1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les objets numériques génériques.
26 __author__ = "Jean-Philippe ARGAUD"
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
36 # ==============================================================================
37 def ExecuteFunction( triplet ):
38 assert len(triplet) == 3, "Incorrect number of arguments"
39 X, xArgs, funcrepr = triplet
40 __X = numpy.asmatrix(numpy.ravel( X )).T
41 __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42 __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43 __fonction = getattr(__module,funcrepr["__userFunction__name"])
44 sys.path = __sys_path_tmp ; del __sys_path_tmp
45 if isinstance(xArgs, dict):
46 __HX = __fonction( __X, **xArgs )
48 __HX = __fonction( __X )
49 return numpy.ravel( __HX )
51 # ==============================================================================
52 class FDApproximation(object):
54 Cette classe sert d'interface pour définir les opérateurs approximés. A la
55 création d'un objet, en fournissant une fonction "Function", on obtient un
56 objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
57 "AdjointOperator". On contrôle l'approximation DF avec l'incrément
58 multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
59 "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
60 centrées si le booléen "centeredDF" est vrai.
63 name = "FDApproximation",
68 extraArguments = None,
69 avoidingRedundancy = True,
70 toleranceInRedundancy = 1.e-18,
71 lenghtOfRedundancy = -1,
76 self.__name = str(name)
77 self.__extraArgs = extraArguments
80 import multiprocessing
81 self.__mpEnabled = True
83 self.__mpEnabled = False
85 self.__mpEnabled = False
86 self.__mpWorkers = mpWorkers
87 if self.__mpWorkers is not None and self.__mpWorkers < 1:
88 self.__mpWorkers = None
89 logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
92 self.__mfEnabled = True
94 self.__mfEnabled = False
95 logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
97 if avoidingRedundancy:
99 self.__tolerBP = float(toleranceInRedundancy)
100 self.__lenghtRJ = int(lenghtOfRedundancy)
101 self.__listJPCP = [] # Jacobian Previous Calculated Points
102 self.__listJPCI = [] # Jacobian Previous Calculated Increment
103 self.__listJPCR = [] # Jacobian Previous Calculated Results
104 self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
105 self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
107 self.__avoidRC = False
110 if isinstance(Function,types.FunctionType):
111 logging.debug("FDA Calculs en multiprocessing : FunctionType")
112 self.__userFunction__name = Function.__name__
114 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
116 mod = os.path.abspath(Function.__globals__['__file__'])
117 if not os.path.isfile(mod):
118 raise ImportError("No user defined function or method found with the name %s"%(mod,))
119 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
120 self.__userFunction__path = os.path.dirname(mod)
122 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
123 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
124 elif isinstance(Function,types.MethodType):
125 logging.debug("FDA Calculs en multiprocessing : MethodType")
126 self.__userFunction__name = Function.__name__
128 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
130 mod = os.path.abspath(Function.__func__.__globals__['__file__'])
131 if not os.path.isfile(mod):
132 raise ImportError("No user defined function or method found with the name %s"%(mod,))
133 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
134 self.__userFunction__path = os.path.dirname(mod)
136 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
137 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
139 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
141 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
142 self.__userFunction = self.__userOperator.appliedTo
144 self.__centeredDF = bool(centeredDF)
145 if abs(float(increment)) > 1.e-15:
146 self.__increment = float(increment)
148 self.__increment = 0.01
152 self.__dX = numpy.asmatrix(numpy.ravel( dX )).T
153 logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
155 logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
157 # ---------------------------------------------------------
158 def __doublon__(self, e, l, n, v=None):
159 __ac, __iac = False, -1
160 for i in range(len(l)-1,-1,-1):
161 if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
162 __ac, __iac = True, i
163 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
167 # ---------------------------------------------------------
168 def DirectOperator(self, X, **extraArgs ):
170 Calcul du direct à l'aide de la fonction fournie.
172 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
173 ne doivent pas être données ici à la fonction utilisateur.
175 logging.debug("FDA Calcul DirectOperator (explicite)")
177 _HX = self.__userFunction( X, argsAsSerie = True )
179 _X = numpy.asmatrix(numpy.ravel( X )).T
180 _HX = numpy.ravel(self.__userFunction( _X ))
184 # ---------------------------------------------------------
185 def TangentMatrix(self, X ):
187 Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
188 c'est-à-dire le gradient de H en X. On utilise des différences finies
189 directionnelles autour du point X. X est un numpy.matrix.
191 Différences finies centrées (approximation d'ordre 2):
192 1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
193 dX[i] à la composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
194 on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
196 2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
198 3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
200 Différences finies non centrées (approximation d'ordre 1):
201 1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
202 composante X[i] pour composer X_plus_dXi, et on calcule la réponse
203 HX_plus_dXi = H( X_plus_dXi )
204 2/ On calcule la valeur centrale HX = H(X)
205 3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
207 4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
210 logging.debug("FDA Début du calcul de la Jacobienne")
211 logging.debug("FDA Incrément de............: %s*X"%float(self.__increment))
212 logging.debug("FDA Approximation centrée...: %s"%(self.__centeredDF))
214 if X is None or len(X)==0:
215 raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
217 _X = numpy.asmatrix(numpy.ravel( X )).T
219 if self.__dX is None:
220 _dX = self.__increment * _X
222 _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
224 if (_dX == 0.).any():
227 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
229 _dX = numpy.where( _dX == 0., moyenne, _dX )
231 __alreadyCalculated = False
233 __bidon, __alreadyCalculatedP = self.__doublon__(_X, self.__listJPCP, self.__listJPPN, None)
234 __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
235 if __alreadyCalculatedP == __alreadyCalculatedI > -1:
236 __alreadyCalculated, __i = True, __alreadyCalculatedP
237 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
239 if __alreadyCalculated:
240 logging.debug("FDA Calcul Jacobienne (par récupération du doublon %i)"%__i)
241 _Jacobienne = self.__listJPCR[__i]
243 logging.debug("FDA Calcul Jacobienne (explicite)")
244 if self.__centeredDF:
246 if self.__mpEnabled and not self.__mfEnabled:
248 "__userFunction__path" : self.__userFunction__path,
249 "__userFunction__modl" : self.__userFunction__modl,
250 "__userFunction__name" : self.__userFunction__name,
253 for i in range( len(_dX) ):
255 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
256 _X_plus_dXi[i] = _X[i] + _dXi
257 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
258 _X_moins_dXi[i] = _X[i] - _dXi
260 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
261 _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
263 import multiprocessing
264 self.__pool = multiprocessing.Pool(self.__mpWorkers)
265 _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
270 for i in range( len(_dX) ):
271 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
273 elif self.__mfEnabled:
275 for i in range( len(_dX) ):
277 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
278 _X_plus_dXi[i] = _X[i] + _dXi
279 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
280 _X_moins_dXi[i] = _X[i] - _dXi
282 _xserie.append( _X_plus_dXi )
283 _xserie.append( _X_moins_dXi )
285 _HX_plusmoins_dX = self.DirectOperator( _xserie )
288 for i in range( len(_dX) ):
289 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
293 for i in range( _dX.size ):
295 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
296 _X_plus_dXi[i] = _X[i] + _dXi
297 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
298 _X_moins_dXi[i] = _X[i] - _dXi
300 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
301 _HX_moins_dXi = self.DirectOperator( _X_moins_dXi )
303 _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
307 if self.__mpEnabled and not self.__mfEnabled:
309 "__userFunction__path" : self.__userFunction__path,
310 "__userFunction__modl" : self.__userFunction__modl,
311 "__userFunction__name" : self.__userFunction__name,
314 _jobs.append( (_X.A1, self.__extraArgs, funcrepr) )
315 for i in range( len(_dX) ):
316 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
317 _X_plus_dXi[i] = _X[i] + _dX[i]
319 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
321 import multiprocessing
322 self.__pool = multiprocessing.Pool(self.__mpWorkers)
323 _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
327 _HX = _HX_plus_dX.pop(0)
330 for i in range( len(_dX) ):
331 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
333 elif self.__mfEnabled:
335 _xserie.append( _X.A1 )
336 for i in range( len(_dX) ):
337 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
338 _X_plus_dXi[i] = _X[i] + _dX[i]
340 _xserie.append( _X_plus_dXi )
342 _HX_plus_dX = self.DirectOperator( _xserie )
344 _HX = _HX_plus_dX.pop(0)
347 for i in range( len(_dX) ):
348 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
352 _HX = self.DirectOperator( _X )
353 for i in range( _dX.size ):
355 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
356 _X_plus_dXi[i] = _X[i] + _dXi
358 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
360 _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
363 _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
365 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
366 while len(self.__listJPCP) > self.__lenghtRJ:
367 self.__listJPCP.pop(0)
368 self.__listJPCI.pop(0)
369 self.__listJPCR.pop(0)
370 self.__listJPPN.pop(0)
371 self.__listJPIN.pop(0)
372 self.__listJPCP.append( copy.copy(_X) )
373 self.__listJPCI.append( copy.copy(_dX) )
374 self.__listJPCR.append( copy.copy(_Jacobienne) )
375 self.__listJPPN.append( numpy.linalg.norm(_X) )
376 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
378 logging.debug("FDA Fin du calcul de la Jacobienne")
382 # ---------------------------------------------------------
383 def TangentOperator(self, paire, **extraArgs ):
385 Calcul du tangent à l'aide de la Jacobienne.
387 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
388 ne doivent pas être données ici à la fonction utilisateur.
391 assert len(paire) == 1, "Incorrect lenght of arguments"
393 assert len(_paire) == 2, "Incorrect number of arguments"
395 assert len(paire) == 2, "Incorrect number of arguments"
398 _Jacobienne = self.TangentMatrix( X )
399 if dX is None or len(dX) == 0:
401 # Calcul de la forme matricielle si le second argument est None
402 # -------------------------------------------------------------
403 if self.__mfEnabled: return [_Jacobienne,]
404 else: return _Jacobienne
407 # Calcul de la valeur linéarisée de H en X appliqué à dX
408 # ------------------------------------------------------
409 _dX = numpy.asmatrix(numpy.ravel( dX )).T
410 _HtX = numpy.dot(_Jacobienne, _dX)
411 if self.__mfEnabled: return [_HtX.A1,]
414 # ---------------------------------------------------------
415 def AdjointOperator(self, paire, **extraArgs ):
417 Calcul de l'adjoint à l'aide de la Jacobienne.
419 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
420 ne doivent pas être données ici à la fonction utilisateur.
423 assert len(paire) == 1, "Incorrect lenght of arguments"
425 assert len(_paire) == 2, "Incorrect number of arguments"
427 assert len(paire) == 2, "Incorrect number of arguments"
430 _JacobienneT = self.TangentMatrix( X ).T
431 if Y is None or len(Y) == 0:
433 # Calcul de la forme matricielle si le second argument est None
434 # -------------------------------------------------------------
435 if self.__mfEnabled: return [_JacobienneT,]
436 else: return _JacobienneT
439 # Calcul de la valeur de l'adjoint en X appliqué à Y
440 # --------------------------------------------------
441 _Y = numpy.asmatrix(numpy.ravel( Y )).T
442 _HaY = numpy.dot(_JacobienneT, _Y)
443 if self.__mfEnabled: return [_HaY.A1,]
446 # ==============================================================================
447 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
448 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
450 _bgcenter = numpy.ravel(_bgcenter)[:,None]
452 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
454 if _bgcovariance is None:
455 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
457 _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
458 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
460 return BackgroundEnsemble
462 # ==============================================================================
463 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
464 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
465 def __CenteredRandomAnomalies(Zr, N):
467 Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
468 notes manuscrites de MB et conforme au code de PS avec eps = -1
471 Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
472 Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
473 R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
478 _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
480 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
481 if _bgcovariance is None:
482 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
485 U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
486 _nbctl = _bgcenter.size
487 if _nbmembers > _nbctl:
488 _Z = numpy.concatenate((numpy.dot(
489 numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
490 numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
492 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
493 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
494 BackgroundEnsemble = _bgcenter + _Zca
496 if max(abs(_bgcovariance.flatten())) > 0:
497 _nbctl = _bgcenter.size
498 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
499 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
500 BackgroundEnsemble = _bgcenter + _Zca
502 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
504 return BackgroundEnsemble
506 # ==============================================================================
507 def EnsembleMean( __Ensemble ):
508 "Renvoie la moyenne empirique d'un ensemble"
509 return numpy.asarray(__Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
511 # ==============================================================================
512 def EnsembleOfAnomalies( __Ensemble, __OptMean = None, __Normalisation = 1.):
513 "Renvoie les anomalies centrées à partir d'un ensemble"
514 if __OptMean is None:
515 __Em = EnsembleMean( __Ensemble )
517 __Em = numpy.ravel( __OptMean ).reshape((-1,1))
519 return __Normalisation * (numpy.asarray( __Ensemble ) - __Em)
521 # ==============================================================================
522 def EnsembleErrorCovariance( __Ensemble, __quick = False ):
523 "Renvoie l'estimation empirique de la covariance d'ensemble"
525 # Covariance rapide mais rarement définie positive
526 __Covariance = numpy.cov( __Ensemble )
528 # Résultat souvent identique à numpy.cov, mais plus robuste
529 __n, __m = numpy.asarray( __Ensemble ).shape
530 __Anomalies = EnsembleOfAnomalies( __Ensemble )
531 # Estimation empirique
532 __Covariance = ( __Anomalies @ __Anomalies.T ) / (__m-1)
534 __Covariance = ( __Covariance + __Covariance.T ) * 0.5
535 # Assure la positivité
536 __epsilon = mpr*numpy.trace( __Covariance )
537 __Covariance = __Covariance + __epsilon * numpy.identity(__n)
541 # ==============================================================================
542 def EnsemblePerturbationWithGivenCovariance( __Ensemble, __Covariance, __Seed=None ):
543 "Ajout d'une perturbation à chaque membre d'un ensemble selon une covariance prescrite"
544 if hasattr(__Covariance,"assparsematrix"):
545 if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance.assparsematrix())/abs(__Ensemble).mean() < mpr).all():
546 # Traitement d'une covariance nulle ou presque
548 if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance.assparsematrix()) < mpr).all():
549 # Traitement d'une covariance nulle ou presque
552 if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance)/abs(__Ensemble).mean() < mpr).all():
553 # Traitement d'une covariance nulle ou presque
555 if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance) < mpr).all():
556 # Traitement d'une covariance nulle ou presque
559 __n, __m = __Ensemble.shape
560 if __Seed is not None: numpy.random.seed(__Seed)
562 if hasattr(__Covariance,"isscalar") and __Covariance.isscalar():
563 # Traitement d'une covariance multiple de l'identité
565 __std = numpy.sqrt(__Covariance.assparsematrix())
566 __Ensemble += numpy.random.normal(__zero, __std, size=(__m,__n)).T
568 elif hasattr(__Covariance,"isvector") and __Covariance.isvector():
569 # Traitement d'une covariance diagonale avec variances non identiques
570 __zero = numpy.zeros(__n)
571 __std = numpy.sqrt(__Covariance.assparsematrix())
572 __Ensemble += numpy.asarray([numpy.random.normal(__zero, __std) for i in range(__m)]).T
574 elif hasattr(__Covariance,"ismatrix") and __Covariance.ismatrix():
575 # Traitement d'une covariance pleine
576 __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance.asfullmatrix(__n), size=__m).T
578 elif isinstance(__Covariance, numpy.ndarray):
579 # Traitement d'une covariance numpy pleine, sachant qu'on arrive ici en dernier
580 __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance, size=__m).T
583 raise ValueError("Error in ensemble perturbation with inadequate covariance specification")
587 # ==============================================================================
588 def CovarianceInflation(
590 InflationType = None,
591 InflationFactor = None,
592 BackgroundCov = None,
595 Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
597 Synthèse : Hunt 2007, section 2.3.5
599 if InflationFactor is None:
602 InflationFactor = float(InflationFactor)
604 if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
605 if InflationFactor < 1.:
606 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
607 if InflationFactor < 1.+mpr:
609 OutputCovOrEns = InflationFactor**2 * InputCovOrEns
611 elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
612 if InflationFactor < 1.:
613 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
614 if InflationFactor < 1.+mpr:
616 InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
617 OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
618 + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
620 elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
621 if InflationFactor < 0.:
622 raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
623 if InflationFactor < mpr:
625 __n, __m = numpy.asarray(InputCovOrEns).shape
627 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
628 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
630 elif InflationType == "HybridOnBackgroundCovariance":
631 if InflationFactor < 0.:
632 raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
633 if InflationFactor < mpr:
635 __n, __m = numpy.asarray(InputCovOrEns).shape
637 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
638 if BackgroundCov is None:
639 raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
640 if InputCovOrEns.shape != BackgroundCov.shape:
641 raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
642 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
644 elif InflationType == "Relaxation":
645 raise NotImplementedError("InflationType Relaxation")
648 raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
650 return OutputCovOrEns
652 # ==============================================================================
653 def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None):
654 "Estimation des quantiles a posteriori (selfA est modifié)"
655 nbsamples = selfA._parameters["NumberOfSamplesForQuantiles"]
657 # Traitement des bornes
658 if "StateBoundsForQuantiles" in selfA._parameters:
659 LBounds = selfA._parameters["StateBoundsForQuantiles"] # Prioritaire
660 elif "Bounds" in selfA._parameters:
661 LBounds = selfA._parameters["Bounds"] # Défaut raisonnable
664 if LBounds is not None:
665 LBounds = numpy.asmatrix(ForceNumericBounds( LBounds ))
667 # Échantillonnage des états
670 if selfA._parameters["SimulationForQuantiles"] == "Linear" and HXa is not None:
671 HXa = numpy.matrix(numpy.ravel( HXa )).T
672 for i in range(nbsamples):
673 if selfA._parameters["SimulationForQuantiles"] == "Linear" and HtM is not None:
674 dXr = numpy.matrix(numpy.random.multivariate_normal(numpy.ravel(Xa),A) - numpy.ravel(Xa)).T
675 if LBounds is not None: # "EstimateProjection" par défaut
676 dXr = numpy.max(numpy.hstack((dXr,LBounds[:,0]) - Xa),axis=1)
677 dXr = numpy.min(numpy.hstack((dXr,LBounds[:,1]) - Xa),axis=1)
678 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
680 if selfA._toStore("SampledStateForQuantiles"): Xr = Xa + dXr
681 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear" and Hm is not None:
682 Xr = numpy.matrix(numpy.random.multivariate_normal(numpy.ravel(Xa),A)).T
683 if LBounds is not None: # "EstimateProjection" par défaut
684 Xr = numpy.max(numpy.hstack((Xr,LBounds[:,0])),axis=1)
685 Xr = numpy.min(numpy.hstack((Xr,LBounds[:,1])),axis=1)
686 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
688 raise ValueError("Quantile simulations has only to be Linear or NonLinear.")
692 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.ravel(Xr)
694 YfQ = numpy.hstack((YfQ,Yr))
695 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.vstack((EXr,numpy.ravel(Xr)))
697 # Extraction des quantiles
700 for quantile in selfA._parameters["Quantiles"]:
701 if not (0. <= float(quantile) <= 1.): continue
702 indice = int(nbsamples * float(quantile) - 1./nbsamples)
703 if YQ is None: YQ = YfQ[:,indice]
704 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
705 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
706 if selfA._toStore("SampledStateForQuantiles"):
707 selfA.StoredVariables["SampledStateForQuantiles"].store( EXr.T )
711 # ==============================================================================
712 def ForceNumericBounds( __Bounds ):
713 "Force les bornes à être des valeurs numériques, sauf si None"
714 # Conserve une valeur par défaut à None s'il n'y a pas de bornes
715 if __Bounds is None: return None
716 # Converti toutes les bornes individuelles None à +/- l'infini
717 __Bounds = numpy.asarray( __Bounds, dtype=float )
718 if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0:
719 raise ValueError("Incorrectly defined bounds data")
720 __Bounds[numpy.isnan(__Bounds)[:,0],0] = -sys.float_info.max
721 __Bounds[numpy.isnan(__Bounds)[:,1],1] = sys.float_info.max
724 # ==============================================================================
725 def RecentredBounds( __Bounds, __Center):
726 # Conserve une valeur par défaut à None s'il n'y a pas de bornes
727 if __Bounds is None: return None
728 # Recentre les valeurs numériques de bornes
729 return ForceNumericBounds( __Bounds ) - numpy.ravel( __Center ).transpose((-1,1))
731 # ==============================================================================
732 def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
738 H = HO["Direct"].appliedControledFormTo
740 if selfA._parameters["EstimationOf"] == "State":
741 M = EM["Direct"].appliedControledFormTo
743 if CM is not None and "Tangent" in CM and U is not None:
744 Cm = CM["Tangent"].asMatrix(Xb)
748 # Précalcul des inversions de B et R
751 # Durée d'observation et tailles
752 LagL = selfA._parameters["SmootherLagL"]
753 if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
754 raise ValueError("Fixed-lag smoother requires a series of observation")
755 if Y.stepnumber() < LagL:
756 raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
757 duration = Y.stepnumber()
758 __p = numpy.cumprod(Y.shape())[-1]
760 __m = selfA._parameters["NumberOfMembers"]
762 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
763 selfA.StoredVariables["Analysis"].store( Xb )
764 if selfA._toStore("APosterioriCovariance"):
765 if hasattr(B,"asfullmatrix"):
766 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
768 selfA.StoredVariables["APosterioriCovariance"].store( B )
770 # Calcul direct initial (on privilégie la mémorisation au recalcul)
771 __seed = numpy.random.get_state()
772 selfB = copy.deepcopy(selfA)
773 selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
774 if VariantM == "EnKS16-KalmanFilterFormula":
775 etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
777 raise ValueError("VariantM has to be chosen in the authorized methods list.")
779 EL = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
781 EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
782 selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
784 for step in range(LagL,duration-1):
786 sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
789 if hasattr(Y,"store"):
790 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
792 Ynpu = numpy.ravel( Y ).reshape((__p,1))
795 if hasattr(U,"store") and len(U)>1:
796 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
797 elif hasattr(U,"store") and len(U)==1:
798 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
800 Un = numpy.asmatrix(numpy.ravel( U )).T
804 #--------------------------
805 if VariantM == "EnKS16-KalmanFilterFormula":
806 if selfA._parameters["EstimationOf"] == "State": # Forecast
807 EL = M( [(EL[:,i], Un) for i in range(__m)],
809 returnSerieAsArrayMatrix = True )
810 EL = EnsemblePerturbationWithGivenCovariance( EL, Q )
811 EZ = H( [(EL[:,i], Un) for i in range(__m)],
813 returnSerieAsArrayMatrix = True )
814 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
815 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
817 elif selfA._parameters["EstimationOf"] == "Parameters":
818 # --- > Par principe, M = Id, Q = 0
819 EZ = H( [(EL[:,i], Un) for i in range(__m)],
821 returnSerieAsArrayMatrix = True )
823 vEm = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
824 vZm = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
826 mS = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
827 mS = mS.reshape((-1,__m)) # Pour dimension 1
828 delta = RIdemi @ ( Ynpu - vZm )
829 mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
830 vw = mT @ mS.T @ delta
832 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
833 mU = numpy.identity(__m)
834 wTU = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
836 EX = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
840 for irl in range(LagL): # Lissage des L précédentes analysis
841 vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
842 EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
843 sEL[irl] = vEm + EX @ wTU
845 # Conservation de l'analyse retrospective d'ordre 0 avant rotation
846 Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
847 if selfA._toStore("APosterioriCovariance"):
850 for irl in range(LagL):
851 sEL[irl] = sEL[irl+1]
853 #--------------------------
855 raise ValueError("VariantM has to be chosen in the authorized methods list.")
857 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
859 selfA.StoredVariables["Analysis"].store( Xa )
860 if selfA._toStore("APosterioriCovariance"):
861 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
863 # Stockage des dernières analyses incomplètement remises à jour
864 for irl in range(LagL):
865 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
866 Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
867 selfA.StoredVariables["Analysis"].store( Xa )
871 # ==============================================================================
872 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
874 Ensemble-Transform EnKF
876 if selfA._parameters["EstimationOf"] == "Parameters":
877 selfA._parameters["StoreInternalVariables"] = True
880 H = HO["Direct"].appliedControledFormTo
882 if selfA._parameters["EstimationOf"] == "State":
883 M = EM["Direct"].appliedControledFormTo
885 if CM is not None and "Tangent" in CM and U is not None:
886 Cm = CM["Tangent"].asMatrix(Xb)
890 # Durée d'observation et tailles
891 if hasattr(Y,"stepnumber"):
892 duration = Y.stepnumber()
893 __p = numpy.cumprod(Y.shape())[-1]
896 __p = numpy.array(Y).size
898 # Précalcul des inversions de B et R
899 if selfA._parameters["StoreInternalVariables"] \
900 or selfA._toStore("CostFunctionJ") \
901 or selfA._toStore("CostFunctionJb") \
902 or selfA._toStore("CostFunctionJo") \
903 or selfA._toStore("CurrentOptimum") \
904 or selfA._toStore("APosterioriCovariance"):
907 elif VariantM != "KalmanFilterFormula":
909 if VariantM == "KalmanFilterFormula":
913 __m = selfA._parameters["NumberOfMembers"]
915 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
916 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
917 selfA.StoredVariables["Analysis"].store( Xb )
918 if selfA._toStore("APosterioriCovariance"):
919 if hasattr(B,"asfullmatrix"):
920 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
922 selfA.StoredVariables["APosterioriCovariance"].store( B )
923 selfA._setInternalState("seed", numpy.random.get_state())
924 elif selfA._parameters["nextStep"]:
925 Xn = selfA._getInternalState("Xn")
927 previousJMinimum = numpy.finfo(float).max
929 for step in range(duration-1):
930 numpy.random.set_state(selfA._getInternalState("seed"))
931 if hasattr(Y,"store"):
932 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
934 Ynpu = numpy.ravel( Y ).reshape((__p,1))
937 if hasattr(U,"store") and len(U)>1:
938 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
939 elif hasattr(U,"store") and len(U)==1:
940 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
942 Un = numpy.asmatrix(numpy.ravel( U )).T
946 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
947 Xn = CovarianceInflation( Xn,
948 selfA._parameters["InflationType"],
949 selfA._parameters["InflationFactor"],
952 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
953 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
955 returnSerieAsArrayMatrix = True )
956 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
957 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
959 returnSerieAsArrayMatrix = True )
960 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
961 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
962 Xn_predicted = Xn_predicted + Cm * Un
963 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
964 # --- > Par principe, M = Id, Q = 0
965 Xn_predicted = EMX = Xn
966 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
968 returnSerieAsArrayMatrix = True )
970 # Mean of forecast and observation of forecast
971 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
972 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
975 EaX = EnsembleOfAnomalies( Xn_predicted, Xfm )
976 EaHX = EnsembleOfAnomalies( HX_predicted, Hfm)
978 #--------------------------
979 if VariantM == "KalmanFilterFormula":
980 mS = RIdemi * EaHX / math.sqrt(__m-1)
981 mS = mS.reshape((-1,__m)) # Pour dimension 1
982 delta = RIdemi * ( Ynpu - Hfm )
983 mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
984 vw = mT @ mS.T @ delta
986 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
987 mU = numpy.identity(__m)
989 EaX = EaX / math.sqrt(__m-1)
990 Xn = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
991 #--------------------------
992 elif VariantM == "Variational":
993 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
995 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
996 _Jo = 0.5 * _A.T @ (RI * _A)
997 _Jb = 0.5 * (__m-1) * w.T @ w
1000 def GradientOfCostFunction(w):
1001 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1002 _GardJo = - EaHX.T @ (RI * _A)
1003 _GradJb = (__m-1) * w.reshape((__m,1))
1004 _GradJ = _GardJo + _GradJb
1005 return numpy.ravel(_GradJ)
1006 vw = scipy.optimize.fmin_cg(
1008 x0 = numpy.zeros(__m),
1009 fprime = GradientOfCostFunction,
1014 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1015 Htb = (__m-1) * numpy.identity(__m)
1018 Pta = numpy.linalg.inv( Hta )
1019 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1021 Xn = Xfm + EaX @ (vw[:,None] + EWa)
1022 #--------------------------
1023 elif VariantM == "FiniteSize11": # Jauge Boc2011
1024 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1025 def CostFunction(w):
1026 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1027 _Jo = 0.5 * _A.T @ (RI * _A)
1028 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
1031 def GradientOfCostFunction(w):
1032 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1033 _GardJo = - EaHX.T @ (RI * _A)
1034 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1035 _GradJ = _GardJo + _GradJb
1036 return numpy.ravel(_GradJ)
1037 vw = scipy.optimize.fmin_cg(
1039 x0 = numpy.zeros(__m),
1040 fprime = GradientOfCostFunction,
1045 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1047 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1048 / (1 + 1/__m + vw.T @ vw)**2
1051 Pta = numpy.linalg.inv( Hta )
1052 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1054 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1055 #--------------------------
1056 elif VariantM == "FiniteSize15": # Jauge Boc2015
1057 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1058 def CostFunction(w):
1059 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1060 _Jo = 0.5 * _A.T * RI * _A
1061 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
1064 def GradientOfCostFunction(w):
1065 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1066 _GardJo = - EaHX.T @ (RI * _A)
1067 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1068 _GradJ = _GardJo + _GradJb
1069 return numpy.ravel(_GradJ)
1070 vw = scipy.optimize.fmin_cg(
1072 x0 = numpy.zeros(__m),
1073 fprime = GradientOfCostFunction,
1078 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1080 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1081 / (1 + 1/__m + vw.T @ vw)**2
1084 Pta = numpy.linalg.inv( Hta )
1085 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1087 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1088 #--------------------------
1089 elif VariantM == "FiniteSize16": # Jauge Boc2016
1090 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1091 def CostFunction(w):
1092 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1093 _Jo = 0.5 * _A.T @ (RI * _A)
1094 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
1097 def GradientOfCostFunction(w):
1098 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1099 _GardJo = - EaHX.T @ (RI * _A)
1100 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
1101 _GradJ = _GardJo + _GradJb
1102 return numpy.ravel(_GradJ)
1103 vw = scipy.optimize.fmin_cg(
1105 x0 = numpy.zeros(__m),
1106 fprime = GradientOfCostFunction,
1111 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1112 Htb = ((__m+1) / (__m-1)) * \
1113 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
1114 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
1117 Pta = numpy.linalg.inv( Hta )
1118 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1120 Xn = Xfm + EaX @ (vw[:,None] + EWa)
1121 #--------------------------
1123 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1125 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1126 Xn = CovarianceInflation( Xn,
1127 selfA._parameters["InflationType"],
1128 selfA._parameters["InflationFactor"],
1131 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1132 #--------------------------
1133 selfA._setInternalState("Xn", Xn)
1134 selfA._setInternalState("seed", numpy.random.get_state())
1135 #--------------------------
1137 if selfA._parameters["StoreInternalVariables"] \
1138 or selfA._toStore("CostFunctionJ") \
1139 or selfA._toStore("CostFunctionJb") \
1140 or selfA._toStore("CostFunctionJo") \
1141 or selfA._toStore("APosterioriCovariance") \
1142 or selfA._toStore("InnovationAtCurrentAnalysis") \
1143 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1144 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1145 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1146 _Innovation = Ynpu - _HXa
1148 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1149 # ---> avec analysis
1150 selfA.StoredVariables["Analysis"].store( Xa )
1151 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1152 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1153 if selfA._toStore("InnovationAtCurrentAnalysis"):
1154 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1155 # ---> avec current state
1156 if selfA._parameters["StoreInternalVariables"] \
1157 or selfA._toStore("CurrentState"):
1158 selfA.StoredVariables["CurrentState"].store( Xn )
1159 if selfA._toStore("ForecastState"):
1160 selfA.StoredVariables["ForecastState"].store( EMX )
1161 if selfA._toStore("ForecastCovariance"):
1162 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
1163 if selfA._toStore("BMA"):
1164 selfA.StoredVariables["BMA"].store( EMX - Xa )
1165 if selfA._toStore("InnovationAtCurrentState"):
1166 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1167 if selfA._toStore("SimulatedObservationAtCurrentState") \
1168 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1169 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1171 if selfA._parameters["StoreInternalVariables"] \
1172 or selfA._toStore("CostFunctionJ") \
1173 or selfA._toStore("CostFunctionJb") \
1174 or selfA._toStore("CostFunctionJo") \
1175 or selfA._toStore("CurrentOptimum") \
1176 or selfA._toStore("APosterioriCovariance"):
1177 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1178 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1180 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1181 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1182 selfA.StoredVariables["CostFunctionJ" ].store( J )
1184 if selfA._toStore("IndexOfOptimum") \
1185 or selfA._toStore("CurrentOptimum") \
1186 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1187 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1188 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1189 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1190 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1191 if selfA._toStore("IndexOfOptimum"):
1192 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1193 if selfA._toStore("CurrentOptimum"):
1194 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1195 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1196 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1197 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1198 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1199 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1200 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1201 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1202 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1203 if selfA._toStore("APosterioriCovariance"):
1204 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1205 if selfA._parameters["EstimationOf"] == "Parameters" \
1206 and J < previousJMinimum:
1207 previousJMinimum = J
1209 if selfA._toStore("APosterioriCovariance"):
1210 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1211 # ---> Pour les smoothers
1212 if selfA._toStore("CurrentEnsembleState"):
1213 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
1215 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1216 # ----------------------------------------------------------------------
1217 if selfA._parameters["EstimationOf"] == "Parameters":
1218 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1219 selfA.StoredVariables["Analysis"].store( XaMin )
1220 if selfA._toStore("APosterioriCovariance"):
1221 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1222 if selfA._toStore("BMA"):
1223 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1227 # ==============================================================================
1228 def exkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1230 Extended Kalman Filter
1232 if selfA._parameters["EstimationOf"] == "Parameters":
1233 selfA._parameters["StoreInternalVariables"] = True
1234 selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
1237 H = HO["Direct"].appliedControledFormTo
1239 if selfA._parameters["EstimationOf"] == "State":
1240 M = EM["Direct"].appliedControledFormTo
1242 if CM is not None and "Tangent" in CM and U is not None:
1243 Cm = CM["Tangent"].asMatrix(Xb)
1247 # Durée d'observation et tailles
1248 if hasattr(Y,"stepnumber"):
1249 duration = Y.stepnumber()
1250 __p = numpy.cumprod(Y.shape())[-1]
1253 __p = numpy.array(Y).size
1255 # Précalcul des inversions de B et R
1256 if selfA._parameters["StoreInternalVariables"] \
1257 or selfA._toStore("CostFunctionJ") \
1258 or selfA._toStore("CostFunctionJb") \
1259 or selfA._toStore("CostFunctionJo") \
1260 or selfA._toStore("CurrentOptimum") \
1261 or selfA._toStore("APosterioriCovariance"):
1267 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1270 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1271 selfA.StoredVariables["Analysis"].store( Xb )
1272 if selfA._toStore("APosterioriCovariance"):
1273 if hasattr(B,"asfullmatrix"):
1274 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1276 selfA.StoredVariables["APosterioriCovariance"].store( B )
1277 selfA._setInternalState("seed", numpy.random.get_state())
1278 elif selfA._parameters["nextStep"]:
1279 Xn = selfA._getInternalState("Xn")
1280 Pn = selfA._getInternalState("Pn")
1282 if selfA._parameters["EstimationOf"] == "Parameters":
1284 previousJMinimum = numpy.finfo(float).max
1286 for step in range(duration-1):
1287 if hasattr(Y,"store"):
1288 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1290 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1292 Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
1293 Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
1294 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1295 Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
1297 if selfA._parameters["EstimationOf"] == "State":
1298 Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
1299 Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
1300 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1301 Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
1304 if hasattr(U,"store") and len(U)>1:
1305 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1306 elif hasattr(U,"store") and len(U)==1:
1307 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1309 Un = numpy.asmatrix(numpy.ravel( U )).T
1313 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1314 Xn = numpy.max(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
1315 Xn = numpy.min(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
1317 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1318 Xn_predicted = numpy.asmatrix(numpy.ravel( M( (Xn, Un) ) )).T
1319 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1320 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1321 Xn_predicted = Xn_predicted + Cm * Un
1322 Pn_predicted = Q + Mt * Pn * Ma
1323 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1324 # --- > Par principe, M = Id, Q = 0
1328 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1329 Xn_predicted = numpy.max(numpy.hstack((Xn_predicted,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
1330 Xn_predicted = numpy.min(numpy.hstack((Xn_predicted,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
1332 if selfA._parameters["EstimationOf"] == "State":
1333 HX_predicted = numpy.asmatrix(numpy.ravel( H( (Xn_predicted, None) ) )).T
1334 _Innovation = Ynpu - HX_predicted
1335 elif selfA._parameters["EstimationOf"] == "Parameters":
1336 HX_predicted = numpy.asmatrix(numpy.ravel( H( (Xn_predicted, Un) ) )).T
1337 _Innovation = Ynpu - HX_predicted
1338 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
1339 _Innovation = _Innovation - Cm * Un
1341 Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
1342 Xn = Xn_predicted + Kn * _Innovation
1343 Pn = Pn_predicted - Kn * Ht * Pn_predicted
1345 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1346 Xn = numpy.max(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
1347 Xn = numpy.min(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
1350 #--------------------------
1351 selfA._setInternalState("Xn", Xn)
1352 selfA._setInternalState("Pn", Pn)
1353 #--------------------------
1355 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1356 # ---> avec analysis
1357 selfA.StoredVariables["Analysis"].store( Xa )
1358 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1359 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
1360 if selfA._toStore("InnovationAtCurrentAnalysis"):
1361 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1362 # ---> avec current state
1363 if selfA._parameters["StoreInternalVariables"] \
1364 or selfA._toStore("CurrentState"):
1365 selfA.StoredVariables["CurrentState"].store( Xn )
1366 if selfA._toStore("ForecastState"):
1367 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1368 if selfA._toStore("ForecastCovariance"):
1369 selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
1370 if selfA._toStore("BMA"):
1371 selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
1372 if selfA._toStore("InnovationAtCurrentState"):
1373 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1374 if selfA._toStore("SimulatedObservationAtCurrentState") \
1375 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1376 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1378 if selfA._parameters["StoreInternalVariables"] \
1379 or selfA._toStore("CostFunctionJ") \
1380 or selfA._toStore("CostFunctionJb") \
1381 or selfA._toStore("CostFunctionJo") \
1382 or selfA._toStore("CurrentOptimum") \
1383 or selfA._toStore("APosterioriCovariance"):
1384 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1385 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1387 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1388 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1389 selfA.StoredVariables["CostFunctionJ" ].store( J )
1391 if selfA._toStore("IndexOfOptimum") \
1392 or selfA._toStore("CurrentOptimum") \
1393 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1394 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1395 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1396 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1397 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1398 if selfA._toStore("IndexOfOptimum"):
1399 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1400 if selfA._toStore("CurrentOptimum"):
1401 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1402 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1403 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1404 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1405 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1406 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1407 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1408 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1409 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1410 if selfA._toStore("APosterioriCovariance"):
1411 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1412 if selfA._parameters["EstimationOf"] == "Parameters" \
1413 and J < previousJMinimum:
1414 previousJMinimum = J
1416 if selfA._toStore("APosterioriCovariance"):
1417 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1419 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1420 # ----------------------------------------------------------------------
1421 if selfA._parameters["EstimationOf"] == "Parameters":
1422 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1423 selfA.StoredVariables["Analysis"].store( XaMin )
1424 if selfA._toStore("APosterioriCovariance"):
1425 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1426 if selfA._toStore("BMA"):
1427 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1431 # ==============================================================================
1432 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
1433 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1437 if selfA._parameters["EstimationOf"] == "Parameters":
1438 selfA._parameters["StoreInternalVariables"] = True
1441 H = HO["Direct"].appliedControledFormTo
1443 if selfA._parameters["EstimationOf"] == "State":
1444 M = EM["Direct"].appliedControledFormTo
1446 if CM is not None and "Tangent" in CM and U is not None:
1447 Cm = CM["Tangent"].asMatrix(Xb)
1451 # Durée d'observation et tailles
1452 if hasattr(Y,"stepnumber"):
1453 duration = Y.stepnumber()
1454 __p = numpy.cumprod(Y.shape())[-1]
1457 __p = numpy.array(Y).size
1459 # Précalcul des inversions de B et R
1460 if selfA._parameters["StoreInternalVariables"] \
1461 or selfA._toStore("CostFunctionJ") \
1462 or selfA._toStore("CostFunctionJb") \
1463 or selfA._toStore("CostFunctionJo") \
1464 or selfA._toStore("CurrentOptimum") \
1465 or selfA._toStore("APosterioriCovariance"):
1470 __m = selfA._parameters["NumberOfMembers"]
1472 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1473 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1475 Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
1476 selfA.StoredVariables["Analysis"].store( Xb )
1477 if selfA._toStore("APosterioriCovariance"):
1478 if hasattr(B,"asfullmatrix"):
1479 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1481 selfA.StoredVariables["APosterioriCovariance"].store( B )
1482 selfA._setInternalState("seed", numpy.random.get_state())
1483 elif selfA._parameters["nextStep"]:
1484 Xn = selfA._getInternalState("Xn")
1486 previousJMinimum = numpy.finfo(float).max
1488 for step in range(duration-1):
1489 numpy.random.set_state(selfA._getInternalState("seed"))
1490 if hasattr(Y,"store"):
1491 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1493 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1496 if hasattr(U,"store") and len(U)>1:
1497 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1498 elif hasattr(U,"store") and len(U)==1:
1499 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1501 Un = numpy.asmatrix(numpy.ravel( U )).T
1505 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1506 Xn = CovarianceInflation( Xn,
1507 selfA._parameters["InflationType"],
1508 selfA._parameters["InflationFactor"],
1511 #--------------------------
1512 if VariantM == "IEnKF12":
1513 Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
1514 EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
1518 Ta = numpy.identity(__m)
1519 vw = numpy.zeros(__m)
1520 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1521 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1524 E1 = vx1 + _epsilon * EaX
1526 E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1528 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
1529 E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1531 returnSerieAsArrayMatrix = True )
1532 elif selfA._parameters["EstimationOf"] == "Parameters":
1533 # --- > Par principe, M = Id
1535 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1536 vy1 = H((vx2, Un)).reshape((__p,1))
1538 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
1540 returnSerieAsArrayMatrix = True )
1541 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1544 EaY = (HE2 - vy2) / _epsilon
1546 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1548 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
1549 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
1550 Deltaw = - numpy.linalg.solve(mH,GradJ)
1555 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1559 A2 = EnsembleOfAnomalies( E2 )
1562 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1563 A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
1566 #--------------------------
1568 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1570 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1571 Xn = CovarianceInflation( Xn,
1572 selfA._parameters["InflationType"],
1573 selfA._parameters["InflationFactor"],
1576 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1577 #--------------------------
1578 selfA._setInternalState("Xn", Xn)
1579 selfA._setInternalState("seed", numpy.random.get_state())
1580 #--------------------------
1582 if selfA._parameters["StoreInternalVariables"] \
1583 or selfA._toStore("CostFunctionJ") \
1584 or selfA._toStore("CostFunctionJb") \
1585 or selfA._toStore("CostFunctionJo") \
1586 or selfA._toStore("APosterioriCovariance") \
1587 or selfA._toStore("InnovationAtCurrentAnalysis") \
1588 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1589 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1590 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1591 _Innovation = Ynpu - _HXa
1593 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1594 # ---> avec analysis
1595 selfA.StoredVariables["Analysis"].store( Xa )
1596 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1597 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1598 if selfA._toStore("InnovationAtCurrentAnalysis"):
1599 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1600 # ---> avec current state
1601 if selfA._parameters["StoreInternalVariables"] \
1602 or selfA._toStore("CurrentState"):
1603 selfA.StoredVariables["CurrentState"].store( Xn )
1604 if selfA._toStore("ForecastState"):
1605 selfA.StoredVariables["ForecastState"].store( E2 )
1606 if selfA._toStore("ForecastCovariance"):
1607 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(E2) )
1608 if selfA._toStore("BMA"):
1609 selfA.StoredVariables["BMA"].store( E2 - Xa )
1610 if selfA._toStore("InnovationAtCurrentState"):
1611 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1612 if selfA._toStore("SimulatedObservationAtCurrentState") \
1613 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1614 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1616 if selfA._parameters["StoreInternalVariables"] \
1617 or selfA._toStore("CostFunctionJ") \
1618 or selfA._toStore("CostFunctionJb") \
1619 or selfA._toStore("CostFunctionJo") \
1620 or selfA._toStore("CurrentOptimum") \
1621 or selfA._toStore("APosterioriCovariance"):
1622 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1623 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1625 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1626 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1627 selfA.StoredVariables["CostFunctionJ" ].store( J )
1629 if selfA._toStore("IndexOfOptimum") \
1630 or selfA._toStore("CurrentOptimum") \
1631 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1632 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1633 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1634 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1635 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1636 if selfA._toStore("IndexOfOptimum"):
1637 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1638 if selfA._toStore("CurrentOptimum"):
1639 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1640 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1641 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1642 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1643 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1644 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1645 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1646 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1647 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1648 if selfA._toStore("APosterioriCovariance"):
1649 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1650 if selfA._parameters["EstimationOf"] == "Parameters" \
1651 and J < previousJMinimum:
1652 previousJMinimum = J
1654 if selfA._toStore("APosterioriCovariance"):
1655 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1657 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1658 # ----------------------------------------------------------------------
1659 if selfA._parameters["EstimationOf"] == "Parameters":
1660 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1661 selfA.StoredVariables["Analysis"].store( XaMin )
1662 if selfA._toStore("APosterioriCovariance"):
1663 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1664 if selfA._toStore("BMA"):
1665 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1669 # ==============================================================================
1670 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1678 # Opérateur non-linéaire pour la boucle externe
1679 Hm = HO["Direct"].appliedTo
1681 # Précalcul des inversions de B et R
1685 # Point de démarrage de l'optimisation
1686 Xini = selfA._parameters["InitializationPoint"]
1688 HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
1689 Innovation = Y - HXb
1696 Xr = Xini.reshape((-1,1))
1697 while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
1701 Ht = HO["Tangent"].asMatrix(Xr)
1702 Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
1704 # Définition de la fonction-coût
1705 # ------------------------------
1706 def CostFunction(dx):
1707 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1708 if selfA._parameters["StoreInternalVariables"] or \
1709 selfA._toStore("CurrentState") or \
1710 selfA._toStore("CurrentOptimum"):
1711 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
1713 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1714 _dInnovation = Innovation - _HdX
1715 if selfA._toStore("SimulatedObservationAtCurrentState") or \
1716 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1717 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
1718 if selfA._toStore("InnovationAtCurrentState"):
1719 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
1721 Jb = float( 0.5 * _dX.T * BI * _dX )
1722 Jo = float( 0.5 * _dInnovation.T * RI * _dInnovation )
1725 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1726 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1727 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1728 selfA.StoredVariables["CostFunctionJ" ].store( J )
1729 if selfA._toStore("IndexOfOptimum") or \
1730 selfA._toStore("CurrentOptimum") or \
1731 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1732 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1733 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1734 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1735 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1736 if selfA._toStore("IndexOfOptimum"):
1737 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1738 if selfA._toStore("CurrentOptimum"):
1739 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1740 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1741 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1742 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1743 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1744 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1745 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1746 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1747 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1750 def GradientOfCostFunction(dx):
1751 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1753 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1754 _dInnovation = Innovation - _HdX
1756 GradJo = - Ht.T @ (RI * _dInnovation)
1757 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1760 # Minimisation de la fonctionnelle
1761 # --------------------------------
1762 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1764 if selfA._parameters["Minimizer"] == "LBFGSB":
1765 # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1766 if "0.19" <= scipy.version.version <= "1.1.0":
1767 import lbfgsbhlt as optimiseur
1769 import scipy.optimize as optimiseur
1770 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1771 func = CostFunction,
1772 x0 = numpy.zeros(Xini.size),
1773 fprime = GradientOfCostFunction,
1775 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
1776 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
1777 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
1778 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1779 iprint = selfA._parameters["optiprint"],
1781 nfeval = Informations['funcalls']
1782 rc = Informations['warnflag']
1783 elif selfA._parameters["Minimizer"] == "TNC":
1784 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1785 func = CostFunction,
1786 x0 = numpy.zeros(Xini.size),
1787 fprime = GradientOfCostFunction,
1789 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
1790 maxfun = selfA._parameters["MaximumNumberOfSteps"],
1791 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1792 ftol = selfA._parameters["CostDecrementTolerance"],
1793 messages = selfA._parameters["optmessages"],
1795 elif selfA._parameters["Minimizer"] == "CG":
1796 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1798 x0 = numpy.zeros(Xini.size),
1799 fprime = GradientOfCostFunction,
1801 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1802 gtol = selfA._parameters["GradientNormTolerance"],
1803 disp = selfA._parameters["optdisp"],
1806 elif selfA._parameters["Minimizer"] == "NCG":
1807 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1809 x0 = numpy.zeros(Xini.size),
1810 fprime = GradientOfCostFunction,
1812 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1813 avextol = selfA._parameters["CostDecrementTolerance"],
1814 disp = selfA._parameters["optdisp"],
1817 elif selfA._parameters["Minimizer"] == "BFGS":
1818 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1820 x0 = numpy.zeros(Xini.size),
1821 fprime = GradientOfCostFunction,
1823 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1824 gtol = selfA._parameters["GradientNormTolerance"],
1825 disp = selfA._parameters["optdisp"],
1829 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1831 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1832 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1834 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1835 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1836 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1838 Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
1841 DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
1842 iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
1844 # Obtention de l'analyse
1845 # ----------------------
1848 selfA.StoredVariables["Analysis"].store( Xa )
1850 if selfA._toStore("OMA") or \
1851 selfA._toStore("SigmaObs2") or \
1852 selfA._toStore("SimulationQuantiles") or \
1853 selfA._toStore("SimulatedObservationAtOptimum"):
1854 if selfA._toStore("SimulatedObservationAtCurrentState"):
1855 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1856 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1857 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1861 # Calcul de la covariance d'analyse
1862 # ---------------------------------
1863 if selfA._toStore("APosterioriCovariance") or \
1864 selfA._toStore("SimulationQuantiles") or \
1865 selfA._toStore("JacobianMatrixAtOptimum") or \
1866 selfA._toStore("KalmanGainAtOptimum"):
1867 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1868 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1869 if selfA._toStore("APosterioriCovariance") or \
1870 selfA._toStore("SimulationQuantiles") or \
1871 selfA._toStore("KalmanGainAtOptimum"):
1872 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1873 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1874 if selfA._toStore("APosterioriCovariance") or \
1875 selfA._toStore("SimulationQuantiles"):
1879 _ee = numpy.matrix(numpy.zeros(nb)).T
1881 _HtEE = numpy.dot(HtM,_ee)
1882 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
1883 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1884 HessienneI = numpy.matrix( HessienneI )
1886 if min(A.shape) != max(A.shape):
1887 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1888 if (numpy.diag(A) < 0).any():
1889 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1890 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1892 L = numpy.linalg.cholesky( A )
1894 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1895 if selfA._toStore("APosterioriCovariance"):
1896 selfA.StoredVariables["APosterioriCovariance"].store( A )
1897 if selfA._toStore("JacobianMatrixAtOptimum"):
1898 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1899 if selfA._toStore("KalmanGainAtOptimum"):
1900 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1901 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1902 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1904 # Calculs et/ou stockages supplémentaires
1905 # ---------------------------------------
1906 if selfA._toStore("Innovation") or \
1907 selfA._toStore("SigmaObs2") or \
1908 selfA._toStore("MahalanobisConsistency") or \
1909 selfA._toStore("OMB"):
1911 if selfA._toStore("Innovation"):
1912 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1913 if selfA._toStore("BMA"):
1914 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1915 if selfA._toStore("OMA"):
1916 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1917 if selfA._toStore("OMB"):
1918 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1919 if selfA._toStore("SigmaObs2"):
1920 TraceR = R.trace(Y.size)
1921 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1922 if selfA._toStore("MahalanobisConsistency"):
1923 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1924 if selfA._toStore("SimulationQuantiles"):
1925 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
1926 if selfA._toStore("SimulatedObservationAtBackground"):
1927 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1928 if selfA._toStore("SimulatedObservationAtOptimum"):
1929 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1933 # ==============================================================================
1934 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
1935 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1937 Maximum Likelihood Ensemble Filter
1939 if selfA._parameters["EstimationOf"] == "Parameters":
1940 selfA._parameters["StoreInternalVariables"] = True
1943 H = HO["Direct"].appliedControledFormTo
1945 if selfA._parameters["EstimationOf"] == "State":
1946 M = EM["Direct"].appliedControledFormTo
1948 if CM is not None and "Tangent" in CM and U is not None:
1949 Cm = CM["Tangent"].asMatrix(Xb)
1953 # Durée d'observation et tailles
1954 if hasattr(Y,"stepnumber"):
1955 duration = Y.stepnumber()
1956 __p = numpy.cumprod(Y.shape())[-1]
1959 __p = numpy.array(Y).size
1961 # Précalcul des inversions de B et R
1962 if selfA._parameters["StoreInternalVariables"] \
1963 or selfA._toStore("CostFunctionJ") \
1964 or selfA._toStore("CostFunctionJb") \
1965 or selfA._toStore("CostFunctionJo") \
1966 or selfA._toStore("CurrentOptimum") \
1967 or selfA._toStore("APosterioriCovariance"):
1972 __m = selfA._parameters["NumberOfMembers"]
1974 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1975 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1976 selfA.StoredVariables["Analysis"].store( Xb )
1977 if selfA._toStore("APosterioriCovariance"):
1978 if hasattr(B,"asfullmatrix"):
1979 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1981 selfA.StoredVariables["APosterioriCovariance"].store( B )
1982 selfA._setInternalState("seed", numpy.random.get_state())
1983 elif selfA._parameters["nextStep"]:
1984 Xn = selfA._getInternalState("Xn")
1986 previousJMinimum = numpy.finfo(float).max
1988 for step in range(duration-1):
1989 numpy.random.set_state(selfA._getInternalState("seed"))
1990 if hasattr(Y,"store"):
1991 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1993 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1996 if hasattr(U,"store") and len(U)>1:
1997 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1998 elif hasattr(U,"store") and len(U)==1:
1999 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2001 Un = numpy.asmatrix(numpy.ravel( U )).T
2005 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2006 Xn = CovarianceInflation( Xn,
2007 selfA._parameters["InflationType"],
2008 selfA._parameters["InflationFactor"],
2011 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2012 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2014 returnSerieAsArrayMatrix = True )
2015 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
2016 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2017 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2018 Xn_predicted = Xn_predicted + Cm * Un
2019 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2020 # --- > Par principe, M = Id, Q = 0
2021 Xn_predicted = EMX = Xn
2023 #--------------------------
2024 if VariantM == "MLEF13":
2025 Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
2026 EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
2027 Ua = numpy.identity(__m)
2031 Ta = numpy.identity(__m)
2032 vw = numpy.zeros(__m)
2033 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2034 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
2037 E1 = vx1 + _epsilon * EaX
2039 E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
2041 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2043 returnSerieAsArrayMatrix = True )
2044 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2047 EaY = (HE2 - vy2) / _epsilon
2049 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
2051 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
2052 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
2053 Deltaw = - numpy.linalg.solve(mH,GradJ)
2058 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2063 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2065 Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
2066 #--------------------------
2068 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2070 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2071 Xn = CovarianceInflation( Xn,
2072 selfA._parameters["InflationType"],
2073 selfA._parameters["InflationFactor"],
2076 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2077 #--------------------------
2078 selfA._setInternalState("Xn", Xn)
2079 selfA._setInternalState("seed", numpy.random.get_state())
2080 #--------------------------
2082 if selfA._parameters["StoreInternalVariables"] \
2083 or selfA._toStore("CostFunctionJ") \
2084 or selfA._toStore("CostFunctionJb") \
2085 or selfA._toStore("CostFunctionJo") \
2086 or selfA._toStore("APosterioriCovariance") \
2087 or selfA._toStore("InnovationAtCurrentAnalysis") \
2088 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2089 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2090 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2091 _Innovation = Ynpu - _HXa
2093 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2094 # ---> avec analysis
2095 selfA.StoredVariables["Analysis"].store( Xa )
2096 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2097 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2098 if selfA._toStore("InnovationAtCurrentAnalysis"):
2099 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2100 # ---> avec current state
2101 if selfA._parameters["StoreInternalVariables"] \
2102 or selfA._toStore("CurrentState"):
2103 selfA.StoredVariables["CurrentState"].store( Xn )
2104 if selfA._toStore("ForecastState"):
2105 selfA.StoredVariables["ForecastState"].store( EMX )
2106 if selfA._toStore("ForecastCovariance"):
2107 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
2108 if selfA._toStore("BMA"):
2109 selfA.StoredVariables["BMA"].store( EMX - Xa )
2110 if selfA._toStore("InnovationAtCurrentState"):
2111 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
2112 if selfA._toStore("SimulatedObservationAtCurrentState") \
2113 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2114 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2116 if selfA._parameters["StoreInternalVariables"] \
2117 or selfA._toStore("CostFunctionJ") \
2118 or selfA._toStore("CostFunctionJb") \
2119 or selfA._toStore("CostFunctionJo") \
2120 or selfA._toStore("CurrentOptimum") \
2121 or selfA._toStore("APosterioriCovariance"):
2122 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2123 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2125 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2126 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2127 selfA.StoredVariables["CostFunctionJ" ].store( J )
2129 if selfA._toStore("IndexOfOptimum") \
2130 or selfA._toStore("CurrentOptimum") \
2131 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2132 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2133 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2134 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2135 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2136 if selfA._toStore("IndexOfOptimum"):
2137 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2138 if selfA._toStore("CurrentOptimum"):
2139 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2140 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2141 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2142 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2143 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2144 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2145 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2146 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2147 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2148 if selfA._toStore("APosterioriCovariance"):
2149 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2150 if selfA._parameters["EstimationOf"] == "Parameters" \
2151 and J < previousJMinimum:
2152 previousJMinimum = J
2154 if selfA._toStore("APosterioriCovariance"):
2155 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
2157 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2158 # ----------------------------------------------------------------------
2159 if selfA._parameters["EstimationOf"] == "Parameters":
2160 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2161 selfA.StoredVariables["Analysis"].store( XaMin )
2162 if selfA._toStore("APosterioriCovariance"):
2163 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2164 if selfA._toStore("BMA"):
2165 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2169 # ==============================================================================
2181 Implémentation informatique de l'algorithme MMQR, basée sur la publication :
2182 David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
2183 Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
2186 # Recuperation des donnees et informations initiales
2187 # --------------------------------------------------
2188 variables = numpy.ravel( x0 )
2189 mesures = numpy.ravel( y )
2190 increment = sys.float_info[0]
2193 quantile = float(quantile)
2195 # Calcul des parametres du MM
2196 # ---------------------------
2197 tn = float(toler) / n
2198 e0 = -tn / math.log(tn)
2199 epsilon = (e0-tn)/(1+math.log(e0))
2201 # Calculs d'initialisation
2202 # ------------------------
2203 residus = mesures - numpy.ravel( func( variables ) )
2204 poids = 1./(epsilon+numpy.abs(residus))
2205 veps = 1. - 2. * quantile - residus * poids
2206 lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
2209 # Recherche iterative
2210 # -------------------
2211 while (increment > toler) and (iteration < maxfun) :
2214 Derivees = numpy.array(fprime(variables))
2215 Derivees = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
2216 DeriveesT = Derivees.transpose()
2217 M = numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
2218 SM = numpy.transpose(numpy.dot( DeriveesT , veps ))
2219 step = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
2221 variables = variables + step
2222 if bounds is not None:
2223 # Attention : boucle infinie à éviter si un intervalle est trop petit
2224 while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
2226 variables = variables - step
2227 residus = mesures - numpy.ravel( func(variables) )
2228 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2230 while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
2232 variables = variables - step
2233 residus = mesures - numpy.ravel( func(variables) )
2234 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2236 increment = lastsurrogate-surrogate
2237 poids = 1./(epsilon+numpy.abs(residus))
2238 veps = 1. - 2. * quantile - residus * poids
2239 lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
2243 Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
2245 return variables, Ecart, [n,p,iteration,increment,0]
2247 # ==============================================================================
2248 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
2250 3DVAR multi-pas et multi-méthodes
2254 if selfA._parameters["EstimationOf"] == "State":
2255 M = EM["Direct"].appliedTo
2257 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2258 Xn = numpy.ravel(Xb).reshape((-1,1))
2259 selfA.StoredVariables["Analysis"].store( Xn )
2260 if selfA._toStore("APosterioriCovariance"):
2261 if hasattr(B,"asfullmatrix"):
2262 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(Xn.size) )
2264 selfA.StoredVariables["APosterioriCovariance"].store( B )
2265 if selfA._toStore("ForecastState"):
2266 selfA.StoredVariables["ForecastState"].store( Xn )
2267 elif selfA._parameters["nextStep"]:
2268 Xn = selfA._getInternalState("Xn")
2270 Xn = numpy.ravel(Xb).reshape((-1,1))
2272 if hasattr(Y,"stepnumber"):
2273 duration = Y.stepnumber()
2278 for step in range(duration-1):
2279 if hasattr(Y,"store"):
2280 Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
2282 Ynpu = numpy.ravel( Y ).reshape((-1,1))
2284 if selfA._parameters["EstimationOf"] == "State": # Forecast
2285 Xn_predicted = M( Xn )
2286 if selfA._toStore("ForecastState"):
2287 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
2288 elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
2289 # --- > Par principe, M = Id, Q = 0
2291 Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
2293 oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
2295 Xn = selfA.StoredVariables["Analysis"][-1]
2296 #--------------------------
2297 selfA._setInternalState("Xn", Xn)
2301 # ==============================================================================
2302 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2311 Hm = HO["Direct"].appliedTo
2313 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
2314 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2315 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2318 HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2319 if Y.size != HXb.size:
2320 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2321 if max(Y.shape) != max(HXb.shape):
2322 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2324 if selfA._toStore("JacobianMatrixAtBackground"):
2325 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2326 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2327 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2329 Ht = HO["Tangent"].asMatrix(Xb)
2331 HBHTpR = R + Ht * BHT
2332 Innovation = Y - HXb
2334 # Point de démarrage de l'optimisation
2335 Xini = numpy.zeros(Xb.shape)
2337 # Définition de la fonction-coût
2338 # ------------------------------
2339 def CostFunction(w):
2340 _W = numpy.asmatrix(numpy.ravel( w )).T
2341 if selfA._parameters["StoreInternalVariables"] or \
2342 selfA._toStore("CurrentState") or \
2343 selfA._toStore("CurrentOptimum"):
2344 selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
2345 if selfA._toStore("SimulatedObservationAtCurrentState") or \
2346 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2347 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
2348 if selfA._toStore("InnovationAtCurrentState"):
2349 selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
2351 Jb = float( 0.5 * _W.T * HBHTpR * _W )
2352 Jo = float( - _W.T * Innovation )
2355 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2356 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2357 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2358 selfA.StoredVariables["CostFunctionJ" ].store( J )
2359 if selfA._toStore("IndexOfOptimum") or \
2360 selfA._toStore("CurrentOptimum") or \
2361 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2362 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2363 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2364 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2365 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2366 if selfA._toStore("IndexOfOptimum"):
2367 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2368 if selfA._toStore("CurrentOptimum"):
2369 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2370 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2371 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2372 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2373 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2374 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2375 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2376 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2377 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2380 def GradientOfCostFunction(w):
2381 _W = numpy.asmatrix(numpy.ravel( w )).T
2382 GradJb = HBHTpR * _W
2383 GradJo = - Innovation
2384 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2387 # Minimisation de la fonctionnelle
2388 # --------------------------------
2389 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2391 if selfA._parameters["Minimizer"] == "LBFGSB":
2392 if "0.19" <= scipy.version.version <= "1.1.0":
2393 import lbfgsbhlt as optimiseur
2395 import scipy.optimize as optimiseur
2396 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2397 func = CostFunction,
2399 fprime = GradientOfCostFunction,
2401 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
2402 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
2403 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
2404 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2405 iprint = selfA._parameters["optiprint"],
2407 nfeval = Informations['funcalls']
2408 rc = Informations['warnflag']
2409 elif selfA._parameters["Minimizer"] == "TNC":
2410 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2411 func = CostFunction,
2413 fprime = GradientOfCostFunction,
2415 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
2416 maxfun = selfA._parameters["MaximumNumberOfSteps"],
2417 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2418 ftol = selfA._parameters["CostDecrementTolerance"],
2419 messages = selfA._parameters["optmessages"],
2421 elif selfA._parameters["Minimizer"] == "CG":
2422 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2425 fprime = GradientOfCostFunction,
2427 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2428 gtol = selfA._parameters["GradientNormTolerance"],
2429 disp = selfA._parameters["optdisp"],
2432 elif selfA._parameters["Minimizer"] == "NCG":
2433 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2436 fprime = GradientOfCostFunction,
2438 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2439 avextol = selfA._parameters["CostDecrementTolerance"],
2440 disp = selfA._parameters["optdisp"],
2443 elif selfA._parameters["Minimizer"] == "BFGS":
2444 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2447 fprime = GradientOfCostFunction,
2449 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2450 gtol = selfA._parameters["GradientNormTolerance"],
2451 disp = selfA._parameters["optdisp"],
2455 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2457 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2458 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2460 # Correction pour pallier a un bug de TNC sur le retour du Minimum
2461 # ----------------------------------------------------------------
2462 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2463 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2464 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
2466 Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
2468 # Obtention de l'analyse
2469 # ----------------------
2472 selfA.StoredVariables["Analysis"].store( Xa )
2474 if selfA._toStore("OMA") or \
2475 selfA._toStore("SigmaObs2") or \
2476 selfA._toStore("SimulationQuantiles") or \
2477 selfA._toStore("SimulatedObservationAtOptimum"):
2478 if selfA._toStore("SimulatedObservationAtCurrentState"):
2479 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2480 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2481 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2485 # Calcul de la covariance d'analyse
2486 # ---------------------------------
2487 if selfA._toStore("APosterioriCovariance") or \
2488 selfA._toStore("SimulationQuantiles") or \
2489 selfA._toStore("JacobianMatrixAtOptimum") or \
2490 selfA._toStore("KalmanGainAtOptimum"):
2491 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2492 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2493 if selfA._toStore("APosterioriCovariance") or \
2494 selfA._toStore("SimulationQuantiles") or \
2495 selfA._toStore("KalmanGainAtOptimum"):
2496 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2497 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2498 if selfA._toStore("APosterioriCovariance") or \
2499 selfA._toStore("SimulationQuantiles"):
2505 _ee = numpy.matrix(numpy.zeros(nb)).T
2507 _HtEE = numpy.dot(HtM,_ee)
2508 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
2509 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2510 HessienneI = numpy.matrix( HessienneI )
2512 if min(A.shape) != max(A.shape):
2513 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2514 if (numpy.diag(A) < 0).any():
2515 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2516 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2518 L = numpy.linalg.cholesky( A )
2520 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2521 if selfA._toStore("APosterioriCovariance"):
2522 selfA.StoredVariables["APosterioriCovariance"].store( A )
2523 if selfA._toStore("JacobianMatrixAtOptimum"):
2524 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2525 if selfA._toStore("KalmanGainAtOptimum"):
2526 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2527 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2528 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2530 # Calculs et/ou stockages supplémentaires
2531 # ---------------------------------------
2532 if selfA._toStore("Innovation") or \
2533 selfA._toStore("SigmaObs2") or \
2534 selfA._toStore("MahalanobisConsistency") or \
2535 selfA._toStore("OMB"):
2537 if selfA._toStore("Innovation"):
2538 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2539 if selfA._toStore("BMA"):
2540 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2541 if selfA._toStore("OMA"):
2542 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2543 if selfA._toStore("OMB"):
2544 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2545 if selfA._toStore("SigmaObs2"):
2546 TraceR = R.trace(Y.size)
2547 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2548 if selfA._toStore("MahalanobisConsistency"):
2549 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2550 if selfA._toStore("SimulationQuantiles"):
2551 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
2552 if selfA._toStore("SimulatedObservationAtBackground"):
2553 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2554 if selfA._toStore("SimulatedObservationAtOptimum"):
2555 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2559 # ==============================================================================
2560 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16"):
2564 if selfA._parameters["EstimationOf"] == "Parameters":
2565 selfA._parameters["StoreInternalVariables"] = True
2568 H = HO["Direct"].appliedControledFormTo
2570 if selfA._parameters["EstimationOf"] == "State":
2571 M = EM["Direct"].appliedControledFormTo
2573 if CM is not None and "Tangent" in CM and U is not None:
2574 Cm = CM["Tangent"].asMatrix(Xb)
2578 # Durée d'observation et tailles
2579 if hasattr(Y,"stepnumber"):
2580 duration = Y.stepnumber()
2581 __p = numpy.cumprod(Y.shape())[-1]
2584 __p = numpy.array(Y).size
2586 # Précalcul des inversions de B et R
2587 if selfA._parameters["StoreInternalVariables"] \
2588 or selfA._toStore("CostFunctionJ") \
2589 or selfA._toStore("CostFunctionJb") \
2590 or selfA._toStore("CostFunctionJo") \
2591 or selfA._toStore("CurrentOptimum") \
2592 or selfA._toStore("APosterioriCovariance"):
2597 __m = selfA._parameters["NumberOfMembers"]
2599 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2602 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2603 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2604 selfA.StoredVariables["Analysis"].store( Xb )
2605 if selfA._toStore("APosterioriCovariance"):
2606 if hasattr(B,"asfullmatrix"):
2607 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
2609 selfA.StoredVariables["APosterioriCovariance"].store( B )
2610 selfA._setInternalState("seed", numpy.random.get_state())
2611 elif selfA._parameters["nextStep"]:
2612 Xn = selfA._getInternalState("Xn")
2614 previousJMinimum = numpy.finfo(float).max
2616 for step in range(duration-1):
2617 numpy.random.set_state(selfA._getInternalState("seed"))
2618 if hasattr(Y,"store"):
2619 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2621 Ynpu = numpy.ravel( Y ).reshape((__p,1))
2624 if hasattr(U,"store") and len(U)>1:
2625 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2626 elif hasattr(U,"store") and len(U)==1:
2627 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2629 Un = numpy.asmatrix(numpy.ravel( U )).T
2633 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2634 Xn = CovarianceInflation( Xn,
2635 selfA._parameters["InflationType"],
2636 selfA._parameters["InflationFactor"],
2639 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2640 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2642 returnSerieAsArrayMatrix = True )
2643 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
2644 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2646 returnSerieAsArrayMatrix = True )
2647 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2648 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2649 Xn_predicted = Xn_predicted + Cm * Un
2650 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2651 # --- > Par principe, M = Id, Q = 0
2652 Xn_predicted = EMX = Xn
2653 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2655 returnSerieAsArrayMatrix = True )
2657 # Mean of forecast and observation of forecast
2658 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2659 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2661 #--------------------------
2662 if VariantM == "KalmanFilterFormula05":
2663 PfHT, HPfHT = 0., 0.
2664 for i in range(__m):
2665 Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
2666 Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
2667 PfHT += Exfi * Eyfi.T
2668 HPfHT += Eyfi * Eyfi.T
2669 PfHT = (1./(__m-1)) * PfHT
2670 HPfHT = (1./(__m-1)) * HPfHT
2671 Kn = PfHT * ( R + HPfHT ).I
2674 for i in range(__m):
2675 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
2676 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
2677 #--------------------------
2678 elif VariantM == "KalmanFilterFormula16":
2679 EpY = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
2680 EpYm = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2682 EaX = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
2683 EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
2685 Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
2687 for i in range(__m):
2688 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
2689 #--------------------------
2691 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2693 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2694 Xn = CovarianceInflation( Xn,
2695 selfA._parameters["InflationType"],
2696 selfA._parameters["InflationFactor"],
2699 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2700 #--------------------------
2701 selfA._setInternalState("Xn", Xn)
2702 selfA._setInternalState("seed", numpy.random.get_state())
2703 #--------------------------
2705 if selfA._parameters["StoreInternalVariables"] \
2706 or selfA._toStore("CostFunctionJ") \
2707 or selfA._toStore("CostFunctionJb") \
2708 or selfA._toStore("CostFunctionJo") \
2709 or selfA._toStore("APosterioriCovariance") \
2710 or selfA._toStore("InnovationAtCurrentAnalysis") \
2711 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2712 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2713 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2714 _Innovation = Ynpu - _HXa
2716 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2717 # ---> avec analysis
2718 selfA.StoredVariables["Analysis"].store( Xa )
2719 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2720 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2721 if selfA._toStore("InnovationAtCurrentAnalysis"):
2722 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2723 # ---> avec current state
2724 if selfA._parameters["StoreInternalVariables"] \
2725 or selfA._toStore("CurrentState"):
2726 selfA.StoredVariables["CurrentState"].store( Xn )
2727 if selfA._toStore("ForecastState"):
2728 selfA.StoredVariables["ForecastState"].store( EMX )
2729 if selfA._toStore("ForecastCovariance"):
2730 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
2731 if selfA._toStore("BMA"):
2732 selfA.StoredVariables["BMA"].store( EMX - Xa )
2733 if selfA._toStore("InnovationAtCurrentState"):
2734 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
2735 if selfA._toStore("SimulatedObservationAtCurrentState") \
2736 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2737 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2739 if selfA._parameters["StoreInternalVariables"] \
2740 or selfA._toStore("CostFunctionJ") \
2741 or selfA._toStore("CostFunctionJb") \
2742 or selfA._toStore("CostFunctionJo") \
2743 or selfA._toStore("CurrentOptimum") \
2744 or selfA._toStore("APosterioriCovariance"):
2745 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2746 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2748 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2749 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2750 selfA.StoredVariables["CostFunctionJ" ].store( J )
2752 if selfA._toStore("IndexOfOptimum") \
2753 or selfA._toStore("CurrentOptimum") \
2754 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2755 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2756 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2757 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2758 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2759 if selfA._toStore("IndexOfOptimum"):
2760 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2761 if selfA._toStore("CurrentOptimum"):
2762 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2763 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2764 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2765 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2766 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2767 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2768 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2769 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2770 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2771 if selfA._toStore("APosterioriCovariance"):
2772 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2773 if selfA._parameters["EstimationOf"] == "Parameters" \
2774 and J < previousJMinimum:
2775 previousJMinimum = J
2777 if selfA._toStore("APosterioriCovariance"):
2778 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
2780 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2781 # ----------------------------------------------------------------------
2782 if selfA._parameters["EstimationOf"] == "Parameters":
2783 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2784 selfA.StoredVariables["Analysis"].store( XaMin )
2785 if selfA._toStore("APosterioriCovariance"):
2786 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2787 if selfA._toStore("BMA"):
2788 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2792 # ==============================================================================
2793 def stdkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2795 Standard Kalman Filter
2797 if selfA._parameters["EstimationOf"] == "Parameters":
2798 selfA._parameters["StoreInternalVariables"] = True
2802 Ht = HO["Tangent"].asMatrix(Xb)
2803 Ha = HO["Adjoint"].asMatrix(Xb)
2805 if selfA._parameters["EstimationOf"] == "State":
2806 Mt = EM["Tangent"].asMatrix(Xb)
2807 Ma = EM["Adjoint"].asMatrix(Xb)
2809 if CM is not None and "Tangent" in CM and U is not None:
2810 Cm = CM["Tangent"].asMatrix(Xb)
2814 # Durée d'observation et tailles
2815 if hasattr(Y,"stepnumber"):
2816 duration = Y.stepnumber()
2817 __p = numpy.cumprod(Y.shape())[-1]
2820 __p = numpy.array(Y).size
2822 # Précalcul des inversions de B et R
2823 if selfA._parameters["StoreInternalVariables"] \
2824 or selfA._toStore("CostFunctionJ") \
2825 or selfA._toStore("CostFunctionJb") \
2826 or selfA._toStore("CostFunctionJo") \
2827 or selfA._toStore("CurrentOptimum") \
2828 or selfA._toStore("APosterioriCovariance"):
2834 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2837 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2838 selfA.StoredVariables["Analysis"].store( Xb )
2839 if selfA._toStore("APosterioriCovariance"):
2840 if hasattr(B,"asfullmatrix"):
2841 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
2843 selfA.StoredVariables["APosterioriCovariance"].store( B )
2844 selfA._setInternalState("seed", numpy.random.get_state())
2845 elif selfA._parameters["nextStep"]:
2846 Xn = selfA._getInternalState("Xn")
2847 Pn = selfA._getInternalState("Pn")
2849 if selfA._parameters["EstimationOf"] == "Parameters":
2851 previousJMinimum = numpy.finfo(float).max
2853 for step in range(duration-1):
2854 if hasattr(Y,"store"):
2855 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2857 Ynpu = numpy.ravel( Y ).reshape((__p,1))
2860 if hasattr(U,"store") and len(U)>1:
2861 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2862 elif hasattr(U,"store") and len(U)==1:
2863 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2865 Un = numpy.asmatrix(numpy.ravel( U )).T
2869 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2870 Xn_predicted = Mt * Xn
2871 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2872 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2873 Xn_predicted = Xn_predicted + Cm * Un
2874 Pn_predicted = Q + Mt * Pn * Ma
2875 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2876 # --- > Par principe, M = Id, Q = 0
2880 if selfA._parameters["EstimationOf"] == "State":
2881 HX_predicted = Ht * Xn_predicted
2882 _Innovation = Ynpu - HX_predicted
2883 elif selfA._parameters["EstimationOf"] == "Parameters":
2884 HX_predicted = Ht * Xn_predicted
2885 _Innovation = Ynpu - HX_predicted
2886 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
2887 _Innovation = _Innovation - Cm * Un
2889 Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
2890 Xn = Xn_predicted + Kn * _Innovation
2891 Pn = Pn_predicted - Kn * Ht * Pn_predicted
2894 #--------------------------
2895 selfA._setInternalState("Xn", Xn)
2896 selfA._setInternalState("Pn", Pn)
2897 #--------------------------
2899 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2900 # ---> avec analysis
2901 selfA.StoredVariables["Analysis"].store( Xa )
2902 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2903 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Ht * Xa )
2904 if selfA._toStore("InnovationAtCurrentAnalysis"):
2905 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2906 # ---> avec current state
2907 if selfA._parameters["StoreInternalVariables"] \
2908 or selfA._toStore("CurrentState"):
2909 selfA.StoredVariables["CurrentState"].store( Xn )
2910 if selfA._toStore("ForecastState"):
2911 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
2912 if selfA._toStore("ForecastCovariance"):
2913 selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
2914 if selfA._toStore("BMA"):
2915 selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
2916 if selfA._toStore("InnovationAtCurrentState"):
2917 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
2918 if selfA._toStore("SimulatedObservationAtCurrentState") \
2919 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2920 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2922 if selfA._parameters["StoreInternalVariables"] \
2923 or selfA._toStore("CostFunctionJ") \
2924 or selfA._toStore("CostFunctionJb") \
2925 or selfA._toStore("CostFunctionJo") \
2926 or selfA._toStore("CurrentOptimum") \
2927 or selfA._toStore("APosterioriCovariance"):
2928 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2929 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2931 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2932 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2933 selfA.StoredVariables["CostFunctionJ" ].store( J )
2935 if selfA._toStore("IndexOfOptimum") \
2936 or selfA._toStore("CurrentOptimum") \
2937 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2938 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2939 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2940 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2941 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2942 if selfA._toStore("IndexOfOptimum"):
2943 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2944 if selfA._toStore("CurrentOptimum"):
2945 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2946 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2947 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2948 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2949 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2950 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2951 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2952 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2953 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2954 if selfA._toStore("APosterioriCovariance"):
2955 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2956 if selfA._parameters["EstimationOf"] == "Parameters" \
2957 and J < previousJMinimum:
2958 previousJMinimum = J
2960 if selfA._toStore("APosterioriCovariance"):
2961 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
2963 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2964 # ----------------------------------------------------------------------
2965 if selfA._parameters["EstimationOf"] == "Parameters":
2966 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2967 selfA.StoredVariables["Analysis"].store( XaMin )
2968 if selfA._toStore("APosterioriCovariance"):
2969 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2970 if selfA._toStore("BMA"):
2971 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2975 # ==============================================================================
2976 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2985 Hm = HO["Direct"].appliedTo
2986 Ha = HO["Adjoint"].appliedInXTo
2988 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
2989 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2990 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2993 HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2994 if Y.size != HXb.size:
2995 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2996 if max(Y.shape) != max(HXb.shape):
2997 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2999 if selfA._toStore("JacobianMatrixAtBackground"):
3000 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
3001 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
3002 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
3004 # Précalcul des inversions de B et R
3008 # Point de démarrage de l'optimisation
3009 Xini = selfA._parameters["InitializationPoint"]
3011 # Définition de la fonction-coût
3012 # ------------------------------
3013 def CostFunction(x):
3014 _X = numpy.asmatrix(numpy.ravel( x )).T
3015 if selfA._parameters["StoreInternalVariables"] or \
3016 selfA._toStore("CurrentState") or \
3017 selfA._toStore("CurrentOptimum"):
3018 selfA.StoredVariables["CurrentState"].store( _X )
3020 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
3021 _Innovation = Y - _HX
3022 if selfA._toStore("SimulatedObservationAtCurrentState") or \
3023 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3024 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
3025 if selfA._toStore("InnovationAtCurrentState"):
3026 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3028 Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
3029 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
3032 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3033 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3034 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3035 selfA.StoredVariables["CostFunctionJ" ].store( J )
3036 if selfA._toStore("IndexOfOptimum") or \
3037 selfA._toStore("CurrentOptimum") or \
3038 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3039 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3040 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
3041 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3042 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3043 if selfA._toStore("IndexOfOptimum"):
3044 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3045 if selfA._toStore("CurrentOptimum"):
3046 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3047 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3048 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
3049 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3050 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3051 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3052 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3053 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3054 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3057 def GradientOfCostFunction(x):
3058 _X = numpy.asmatrix(numpy.ravel( x )).T
3060 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
3061 GradJb = BI * (_X - Xb)
3062 GradJo = - Ha( (_X, RI * (Y - _HX)) )
3063 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
3066 # Minimisation de la fonctionnelle
3067 # --------------------------------
3068 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3070 if selfA._parameters["Minimizer"] == "LBFGSB":
3071 if "0.19" <= scipy.version.version <= "1.1.0":
3072 import lbfgsbhlt as optimiseur
3074 import scipy.optimize as optimiseur
3075 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3076 func = CostFunction,
3078 fprime = GradientOfCostFunction,
3080 bounds = selfA._parameters["Bounds"],
3081 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
3082 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
3083 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3084 iprint = selfA._parameters["optiprint"],
3086 nfeval = Informations['funcalls']
3087 rc = Informations['warnflag']
3088 elif selfA._parameters["Minimizer"] == "TNC":
3089 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3090 func = CostFunction,
3092 fprime = GradientOfCostFunction,
3094 bounds = selfA._parameters["Bounds"],
3095 maxfun = selfA._parameters["MaximumNumberOfSteps"],
3096 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3097 ftol = selfA._parameters["CostDecrementTolerance"],
3098 messages = selfA._parameters["optmessages"],
3100 elif selfA._parameters["Minimizer"] == "CG":
3101 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3104 fprime = GradientOfCostFunction,
3106 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3107 gtol = selfA._parameters["GradientNormTolerance"],
3108 disp = selfA._parameters["optdisp"],
3111 elif selfA._parameters["Minimizer"] == "NCG":
3112 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3115 fprime = GradientOfCostFunction,
3117 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3118 avextol = selfA._parameters["CostDecrementTolerance"],
3119 disp = selfA._parameters["optdisp"],
3122 elif selfA._parameters["Minimizer"] == "BFGS":
3123 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3126 fprime = GradientOfCostFunction,
3128 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3129 gtol = selfA._parameters["GradientNormTolerance"],
3130 disp = selfA._parameters["optdisp"],
3134 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3136 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3137 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3139 # Correction pour pallier a un bug de TNC sur le retour du Minimum
3140 # ----------------------------------------------------------------
3141 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3142 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3144 # Obtention de l'analyse
3145 # ----------------------
3146 Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
3148 selfA.StoredVariables["Analysis"].store( Xa )
3150 if selfA._toStore("OMA") or \
3151 selfA._toStore("SigmaObs2") or \
3152 selfA._toStore("SimulationQuantiles") or \
3153 selfA._toStore("SimulatedObservationAtOptimum"):
3154 if selfA._toStore("SimulatedObservationAtCurrentState"):
3155 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3156 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3157 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3161 # Calcul de la covariance d'analyse
3162 # ---------------------------------
3163 if selfA._toStore("APosterioriCovariance") or \
3164 selfA._toStore("SimulationQuantiles") or \
3165 selfA._toStore("JacobianMatrixAtOptimum") or \
3166 selfA._toStore("KalmanGainAtOptimum"):
3167 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3168 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3169 if selfA._toStore("APosterioriCovariance") or \
3170 selfA._toStore("SimulationQuantiles") or \
3171 selfA._toStore("KalmanGainAtOptimum"):
3172 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3173 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3174 if selfA._toStore("APosterioriCovariance") or \
3175 selfA._toStore("SimulationQuantiles"):
3179 _ee = numpy.matrix(numpy.zeros(nb)).T
3181 _HtEE = numpy.dot(HtM,_ee)
3182 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
3183 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
3184 HessienneI = numpy.matrix( HessienneI )
3186 if min(A.shape) != max(A.shape):
3187 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
3188 if (numpy.diag(A) < 0).any():
3189 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
3190 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
3192 L = numpy.linalg.cholesky( A )
3194 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
3195 if selfA._toStore("APosterioriCovariance"):
3196 selfA.StoredVariables["APosterioriCovariance"].store( A )
3197 if selfA._toStore("JacobianMatrixAtOptimum"):
3198 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3199 if selfA._toStore("KalmanGainAtOptimum"):
3200 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3201 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3202 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3204 # Calculs et/ou stockages supplémentaires
3205 # ---------------------------------------
3206 if selfA._toStore("Innovation") or \
3207 selfA._toStore("SigmaObs2") or \
3208 selfA._toStore("MahalanobisConsistency") or \
3209 selfA._toStore("OMB"):
3211 if selfA._toStore("Innovation"):
3212 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
3213 if selfA._toStore("BMA"):
3214 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3215 if selfA._toStore("OMA"):
3216 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3217 if selfA._toStore("OMB"):
3218 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
3219 if selfA._toStore("SigmaObs2"):
3220 TraceR = R.trace(Y.size)
3221 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
3222 if selfA._toStore("MahalanobisConsistency"):
3223 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3224 if selfA._toStore("SimulationQuantiles"):
3225 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
3226 if selfA._toStore("SimulatedObservationAtBackground"):
3227 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
3228 if selfA._toStore("SimulatedObservationAtOptimum"):
3229 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
3233 # ==============================================================================
3234 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3243 Hm = HO["Direct"].appliedControledFormTo
3244 Mm = EM["Direct"].appliedControledFormTo
3246 if CM is not None and "Tangent" in CM and U is not None:
3247 Cm = CM["Tangent"].asMatrix(Xb)
3253 if hasattr(U,"store") and 1<=_step<len(U) :
3254 _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
3255 elif hasattr(U,"store") and len(U)==1:
3256 _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
3258 _Un = numpy.asmatrix(numpy.ravel( U )).T
3263 if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
3264 _Cm = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
3270 # Remarque : les observations sont exploitées à partir du pas de temps
3271 # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
3272 # Donc le pas 0 n'est pas utilisé puisque la première étape commence
3273 # avec l'observation du pas 1.
3275 # Nombre de pas identique au nombre de pas d'observations
3276 if hasattr(Y,"stepnumber"):
3277 duration = Y.stepnumber()
3281 # Précalcul des inversions de B et R
3285 # Point de démarrage de l'optimisation
3286 Xini = selfA._parameters["InitializationPoint"]
3288 # Définition de la fonction-coût
3289 # ------------------------------
3290 selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
3291 selfA.DirectInnovation = [None,] # Le pas 0 n'est pas observé
3292 def CostFunction(x):
3293 _X = numpy.asmatrix(numpy.ravel( x )).T
3294 if selfA._parameters["StoreInternalVariables"] or \
3295 selfA._toStore("CurrentState") or \
3296 selfA._toStore("CurrentOptimum"):
3297 selfA.StoredVariables["CurrentState"].store( _X )
3298 Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
3299 selfA.DirectCalculation = [None,]
3300 selfA.DirectInnovation = [None,]
3303 for step in range(0,duration-1):
3304 if hasattr(Y,"store"):
3305 _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
3307 _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
3311 if selfA._parameters["EstimationOf"] == "State":
3312 _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
3313 elif selfA._parameters["EstimationOf"] == "Parameters":
3316 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3317 __Bounds = ForceNumericBounds( selfA._parameters["Bounds"] )
3318 _Xn = numpy.max(numpy.hstack((_Xn,numpy.asmatrix(__Bounds)[:,0])),axis=1)
3319 _Xn = numpy.min(numpy.hstack((_Xn,numpy.asmatrix(__Bounds)[:,1])),axis=1)
3321 # Etape de différence aux observations
3322 if selfA._parameters["EstimationOf"] == "State":
3323 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
3324 elif selfA._parameters["EstimationOf"] == "Parameters":
3325 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
3327 # Stockage de l'état
3328 selfA.DirectCalculation.append( _Xn )
3329 selfA.DirectInnovation.append( _YmHMX )
3331 # Ajout dans la fonctionnelle d'observation
3332 Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
3335 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3336 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3337 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3338 selfA.StoredVariables["CostFunctionJ" ].store( J )
3339 if selfA._toStore("IndexOfOptimum") or \
3340 selfA._toStore("CurrentOptimum") or \
3341 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3342 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3343 selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3344 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3345 if selfA._toStore("IndexOfOptimum"):
3346 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3347 if selfA._toStore("CurrentOptimum"):
3348 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3349 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3350 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3351 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3352 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3353 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3354 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3357 def GradientOfCostFunction(x):
3358 _X = numpy.asmatrix(numpy.ravel( x )).T
3359 GradJb = BI * (_X - Xb)
3361 for step in range(duration-1,0,-1):
3362 # Étape de récupération du dernier stockage de l'évolution
3363 _Xn = selfA.DirectCalculation.pop()
3364 # Étape de récupération du dernier stockage de l'innovation
3365 _YmHMX = selfA.DirectInnovation.pop()
3366 # Calcul des adjoints
3367 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
3368 Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
3369 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
3370 Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
3371 # Calcul du gradient par état adjoint
3372 GradJo = GradJo + Ha * RI * _YmHMX # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
3373 GradJo = Ma * GradJo # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
3374 GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
3377 # Minimisation de la fonctionnelle
3378 # --------------------------------
3379 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3381 if selfA._parameters["Minimizer"] == "LBFGSB":
3382 if "0.19" <= scipy.version.version <= "1.1.0":
3383 import lbfgsbhlt as optimiseur
3385 import scipy.optimize as optimiseur
3386 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3387 func = CostFunction,
3389 fprime = GradientOfCostFunction,
3391 bounds = selfA._parameters["Bounds"],
3392 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
3393 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
3394 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3395 iprint = selfA._parameters["optiprint"],
3397 nfeval = Informations['funcalls']
3398 rc = Informations['warnflag']
3399 elif selfA._parameters["Minimizer"] == "TNC":
3400 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3401 func = CostFunction,
3403 fprime = GradientOfCostFunction,
3405 bounds = selfA._parameters["Bounds"],
3406 maxfun = selfA._parameters["MaximumNumberOfSteps"],
3407 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3408 ftol = selfA._parameters["CostDecrementTolerance"],
3409 messages = selfA._parameters["optmessages"],
3411 elif selfA._parameters["Minimizer"] == "CG":
3412 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3415 fprime = GradientOfCostFunction,
3417 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3418 gtol = selfA._parameters["GradientNormTolerance"],
3419 disp = selfA._parameters["optdisp"],
3422 elif selfA._parameters["Minimizer"] == "NCG":
3423 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3426 fprime = GradientOfCostFunction,
3428 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3429 avextol = selfA._parameters["CostDecrementTolerance"],
3430 disp = selfA._parameters["optdisp"],
3433 elif selfA._parameters["Minimizer"] == "BFGS":
3434 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3437 fprime = GradientOfCostFunction,
3439 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3440 gtol = selfA._parameters["GradientNormTolerance"],
3441 disp = selfA._parameters["optdisp"],
3445 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3447 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3448 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3450 # Correction pour pallier a un bug de TNC sur le retour du Minimum
3451 # ----------------------------------------------------------------
3452 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3453 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3455 # Obtention de l'analyse
3456 # ----------------------
3457 Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
3459 selfA.StoredVariables["Analysis"].store( Xa )
3461 # Calculs et/ou stockages supplémentaires
3462 # ---------------------------------------
3463 if selfA._toStore("BMA"):
3464 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3468 # ==============================================================================
3469 def uckf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3471 Unscented Kalman Filter
3473 if selfA._parameters["EstimationOf"] == "Parameters":
3474 selfA._parameters["StoreInternalVariables"] = True
3475 selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
3478 Alpha = selfA._parameters["Alpha"]
3479 Beta = selfA._parameters["Beta"]
3480 if selfA._parameters["Kappa"] == 0:
3481 if selfA._parameters["EstimationOf"] == "State":
3483 elif selfA._parameters["EstimationOf"] == "Parameters":
3486 Kappa = selfA._parameters["Kappa"]
3487 Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
3488 Gamma = math.sqrt( L + Lambda )
3492 for i in range(2*L):
3493 Ww.append( 1. / (2.*(L + Lambda)) )
3495 Wm = numpy.array( Ww )
3496 Wm[0] = Lambda / (L + Lambda)
3497 Wc = numpy.array( Ww )
3498 Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
3501 Hm = HO["Direct"].appliedControledFormTo
3503 if selfA._parameters["EstimationOf"] == "State":
3504 Mm = EM["Direct"].appliedControledFormTo
3506 if CM is not None and "Tangent" in CM and U is not None:
3507 Cm = CM["Tangent"].asMatrix(Xb)
3511 # Durée d'observation et tailles
3512 if hasattr(Y,"stepnumber"):
3513 duration = Y.stepnumber()
3514 __p = numpy.cumprod(Y.shape())[-1]
3517 __p = numpy.array(Y).size
3519 # Précalcul des inversions de B et R
3520 if selfA._parameters["StoreInternalVariables"] \
3521 or selfA._toStore("CostFunctionJ") \
3522 or selfA._toStore("CostFunctionJb") \
3523 or selfA._toStore("CostFunctionJo"):
3529 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
3531 if hasattr(B,"asfullmatrix"):
3532 Pn = B.asfullmatrix(__n)
3535 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3536 selfA.StoredVariables["Analysis"].store( Xb )
3537 if selfA._toStore("APosterioriCovariance"):
3538 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3539 elif selfA._parameters["nextStep"]:
3540 Xn = selfA._getInternalState("Xn")
3541 Pn = selfA._getInternalState("Pn")
3543 if selfA._parameters["EstimationOf"] == "Parameters":
3545 previousJMinimum = numpy.finfo(float).max
3547 for step in range(duration-1):
3548 if hasattr(Y,"store"):
3549 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
3551 Ynpu = numpy.ravel( Y ).reshape((__p,1))
3554 if hasattr(U,"store") and len(U)>1:
3555 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
3556 elif hasattr(U,"store") and len(U)==1:
3557 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
3559 Un = numpy.asmatrix(numpy.ravel( U )).T
3563 Pndemi = numpy.linalg.cholesky(Pn)
3564 Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
3565 nbSpts = 2*Xn.size+1
3567 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3568 for point in range(nbSpts):
3569 Xnp[:,point] = numpy.max(numpy.hstack((Xnp[:,point],numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
3570 Xnp[:,point] = numpy.min(numpy.hstack((Xnp[:,point],numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
3573 for point in range(nbSpts):
3574 if selfA._parameters["EstimationOf"] == "State":
3575 XEtnnpi = numpy.asmatrix(numpy.ravel( Mm( (Xnp[:,point], Un) ) )).T
3576 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
3577 Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
3578 XEtnnpi = XEtnnpi + Cm * Un
3579 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3580 XEtnnpi = numpy.max(numpy.hstack((XEtnnpi,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
3581 XEtnnpi = numpy.min(numpy.hstack((XEtnnpi,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
3582 elif selfA._parameters["EstimationOf"] == "Parameters":
3583 # --- > Par principe, M = Id, Q = 0
3584 XEtnnpi = Xnp[:,point]
3585 XEtnnp.append( XEtnnpi )
3586 XEtnnp = numpy.hstack( XEtnnp )
3588 Xncm = numpy.matrix( XEtnnp.getA()*numpy.array(Wm) ).sum(axis=1)
3590 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3591 Xncm = numpy.max(numpy.hstack((Xncm,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
3592 Xncm = numpy.min(numpy.hstack((Xncm,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
3594 if selfA._parameters["EstimationOf"] == "State": Pnm = Q
3595 elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
3596 for point in range(nbSpts):
3597 Pnm += Wc[i] * (XEtnnp[:,point]-Xncm) * (XEtnnp[:,point]-Xncm).T
3599 if selfA._parameters["EstimationOf"] == "Parameters" and selfA._parameters["Bounds"] is not None:
3600 Pnmdemi = selfA._parameters["Reconditioner"] * numpy.linalg.cholesky(Pnm)
3602 Pnmdemi = numpy.linalg.cholesky(Pnm)
3604 Xnnp = numpy.hstack([Xncm, Xncm+Gamma*Pnmdemi, Xncm-Gamma*Pnmdemi])
3606 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3607 for point in range(nbSpts):
3608 Xnnp[:,point] = numpy.max(numpy.hstack((Xnnp[:,point],numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
3609 Xnnp[:,point] = numpy.min(numpy.hstack((Xnnp[:,point],numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
3612 for point in range(nbSpts):
3613 if selfA._parameters["EstimationOf"] == "State":
3614 Ynnpi = numpy.asmatrix(numpy.ravel( Hm( (Xnnp[:,point], None) ) )).T
3615 elif selfA._parameters["EstimationOf"] == "Parameters":
3616 Ynnpi = numpy.asmatrix(numpy.ravel( Hm( (Xnnp[:,point], Un) ) )).T
3617 Ynnp.append( Ynnpi )
3618 Ynnp = numpy.hstack( Ynnp )
3620 Yncm = numpy.matrix( Ynnp.getA()*numpy.array(Wm) ).sum(axis=1)
3624 for point in range(nbSpts):
3625 Pyyn += Wc[i] * (Ynnp[:,point]-Yncm) * (Ynnp[:,point]-Yncm).T
3626 Pxyn += Wc[i] * (Xnnp[:,point]-Xncm) * (Ynnp[:,point]-Yncm).T
3628 _Innovation = Ynpu - Yncm
3629 if selfA._parameters["EstimationOf"] == "Parameters":
3630 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
3631 _Innovation = _Innovation - Cm * Un
3634 Xn = Xncm + Kn * _Innovation
3635 Pn = Pnm - Kn * Pyyn * Kn.T
3637 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3638 Xn = numpy.max(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
3639 Xn = numpy.min(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
3642 #--------------------------
3643 selfA._setInternalState("Xn", Xn)
3644 selfA._setInternalState("Pn", Pn)
3645 #--------------------------
3647 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3648 # ---> avec analysis
3649 selfA.StoredVariables["Analysis"].store( Xa )
3650 # ---> avec current state
3651 if selfA._parameters["StoreInternalVariables"] \
3652 or selfA._toStore("CurrentState"):
3653 selfA.StoredVariables["CurrentState"].store( Xn )
3654 if selfA._toStore("InnovationAtCurrentState"):
3655 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3656 if selfA._parameters["StoreInternalVariables"] \
3657 or selfA._toStore("CostFunctionJ") \
3658 or selfA._toStore("CostFunctionJb") \
3659 or selfA._toStore("CostFunctionJo"):
3660 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
3661 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
3663 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3664 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3665 selfA.StoredVariables["CostFunctionJ" ].store( J )
3666 if selfA._toStore("APosterioriCovariance"):
3667 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3668 if selfA._parameters["EstimationOf"] == "Parameters" \
3669 and J < previousJMinimum:
3670 previousJMinimum = J
3672 if selfA._toStore("APosterioriCovariance"):
3673 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
3675 # Stockage final supplémentaire de l'optimum en estimation de paramètres
3676 # ----------------------------------------------------------------------
3677 if selfA._parameters["EstimationOf"] == "Parameters":
3678 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3679 selfA.StoredVariables["Analysis"].store( XaMin )
3680 if selfA._toStore("APosterioriCovariance"):
3681 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3682 if selfA._toStore("BMA"):
3683 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3687 # ==============================================================================
3688 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3690 3DVAR variational analysis with no inversion of B
3697 Hm = HO["Direct"].appliedTo
3698 Ha = HO["Adjoint"].appliedInXTo
3700 # Précalcul des inversions de B et R
3704 # Point de démarrage de l'optimisation
3705 Xini = numpy.zeros(Xb.shape)
3707 # Définition de la fonction-coût
3708 # ------------------------------
3709 def CostFunction(v):
3710 _V = numpy.asmatrix(numpy.ravel( v )).T
3712 if selfA._parameters["StoreInternalVariables"] or \
3713 selfA._toStore("CurrentState") or \
3714 selfA._toStore("CurrentOptimum"):
3715 selfA.StoredVariables["CurrentState"].store( _X )
3717 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
3718 _Innovation = Y - _HX
3719 if selfA._toStore("SimulatedObservationAtCurrentState") or \
3720 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3721 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
3722 if selfA._toStore("InnovationAtCurrentState"):
3723 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3725 Jb = float( 0.5 * _V.T * BT * _V )
3726 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
3729 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3730 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3731 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3732 selfA.StoredVariables["CostFunctionJ" ].store( J )
3733 if selfA._toStore("IndexOfOptimum") or \
3734 selfA._toStore("CurrentOptimum") or \
3735 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3736 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3737 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
3738 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3739 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3740 if selfA._toStore("IndexOfOptimum"):
3741 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3742 if selfA._toStore("CurrentOptimum"):
3743 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3744 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3745 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
3746 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3747 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3748 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3749 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3750 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3751 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3754 def GradientOfCostFunction(v):
3755 _V = numpy.asmatrix(numpy.ravel( v )).T
3758 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
3760 GradJo = - Ha( (_X, RI * (Y - _HX)) )
3761 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
3764 # Minimisation de la fonctionnelle
3765 # --------------------------------
3766 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3768 if selfA._parameters["Minimizer"] == "LBFGSB":
3769 if "0.19" <= scipy.version.version <= "1.1.0":
3770 import lbfgsbhlt as optimiseur
3772 import scipy.optimize as optimiseur
3773 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3774 func = CostFunction,
3776 fprime = GradientOfCostFunction,
3778 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
3779 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
3780 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
3781 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3782 iprint = selfA._parameters["optiprint"],
3784 nfeval = Informations['funcalls']
3785 rc = Informations['warnflag']
3786 elif selfA._parameters["Minimizer"] == "TNC":
3787 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3788 func = CostFunction,
3790 fprime = GradientOfCostFunction,
3792 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
3793 maxfun = selfA._parameters["MaximumNumberOfSteps"],
3794 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3795 ftol = selfA._parameters["CostDecrementTolerance"],
3796 messages = selfA._parameters["optmessages"],
3798 elif selfA._parameters["Minimizer"] == "CG":
3799 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3802 fprime = GradientOfCostFunction,
3804 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3805 gtol = selfA._parameters["GradientNormTolerance"],
3806 disp = selfA._parameters["optdisp"],
3809 elif selfA._parameters["Minimizer"] == "NCG":
3810 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3813 fprime = GradientOfCostFunction,
3815 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3816 avextol = selfA._parameters["CostDecrementTolerance"],
3817 disp = selfA._parameters["optdisp"],
3820 elif selfA._parameters["Minimizer"] == "BFGS":
3821 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3824 fprime = GradientOfCostFunction,
3826 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3827 gtol = selfA._parameters["GradientNormTolerance"],
3828 disp = selfA._parameters["optdisp"],
3832 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3834 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3835 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3837 # Correction pour pallier a un bug de TNC sur le retour du Minimum
3838 # ----------------------------------------------------------------
3839 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3840 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3841 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
3843 Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
3845 # Obtention de l'analyse
3846 # ----------------------
3849 selfA.StoredVariables["Analysis"].store( Xa )
3851 if selfA._toStore("OMA") or \
3852 selfA._toStore("SigmaObs2") or \
3853 selfA._toStore("SimulationQuantiles") or \
3854 selfA._toStore("SimulatedObservationAtOptimum"):
3855 if selfA._toStore("SimulatedObservationAtCurrentState"):
3856 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3857 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3858 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3862 # Calcul de la covariance d'analyse
3863 # ---------------------------------
3864 if selfA._toStore("APosterioriCovariance") or \
3865 selfA._toStore("SimulationQuantiles") or \
3866 selfA._toStore("JacobianMatrixAtOptimum") or \
3867 selfA._toStore("KalmanGainAtOptimum"):
3868 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3869 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3870 if selfA._toStore("APosterioriCovariance") or \
3871 selfA._toStore("SimulationQuantiles") or \
3872 selfA._toStore("KalmanGainAtOptimum"):
3873 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3874 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3875 if selfA._toStore("APosterioriCovariance") or \
3876 selfA._toStore("SimulationQuantiles"):
3881 _ee = numpy.matrix(numpy.zeros(nb)).T
3883 _HtEE = numpy.dot(HtM,_ee)
3884 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
3885 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
3886 HessienneI = numpy.matrix( HessienneI )
3888 if min(A.shape) != max(A.shape):
3889 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
3890 if (numpy.diag(A) < 0).any():
3891 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
3892 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
3894 L = numpy.linalg.cholesky( A )
3896 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
3897 if selfA._toStore("APosterioriCovariance"):
3898 selfA.StoredVariables["APosterioriCovariance"].store( A )
3899 if selfA._toStore("JacobianMatrixAtOptimum"):
3900 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3901 if selfA._toStore("KalmanGainAtOptimum"):
3902 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3903 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3904 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3906 # Calculs et/ou stockages supplémentaires
3907 # ---------------------------------------
3908 if selfA._toStore("Innovation") or \
3909 selfA._toStore("SigmaObs2") or \
3910 selfA._toStore("MahalanobisConsistency") or \
3911 selfA._toStore("OMB"):
3913 if selfA._toStore("Innovation"):
3914 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
3915 if selfA._toStore("BMA"):
3916 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3917 if selfA._toStore("OMA"):
3918 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3919 if selfA._toStore("OMB"):
3920 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
3921 if selfA._toStore("SigmaObs2"):
3922 TraceR = R.trace(Y.size)
3923 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
3924 if selfA._toStore("MahalanobisConsistency"):
3925 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3926 if selfA._toStore("SimulationQuantiles"):
3927 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
3928 if selfA._toStore("SimulatedObservationAtBackground"):
3929 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
3930 if selfA._toStore("SimulatedObservationAtOptimum"):
3931 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
3935 # ==============================================================================
3936 if __name__ == "__main__":
3937 print('\n AUTODIAGNOSTIC\n')