1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les objets numériques génériques.
26 __author__ = "Jean-Philippe ARGAUD"
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
36 # ==============================================================================
37 def ExecuteFunction( triplet ):
38 assert len(triplet) == 3, "Incorrect number of arguments"
39 X, xArgs, funcrepr = triplet
40 __X = numpy.asmatrix(numpy.ravel( X )).T
41 __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42 __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43 __fonction = getattr(__module,funcrepr["__userFunction__name"])
44 sys.path = __sys_path_tmp ; del __sys_path_tmp
45 if isinstance(xArgs, dict):
46 __HX = __fonction( __X, **xArgs )
48 __HX = __fonction( __X )
49 return numpy.ravel( __HX )
51 # ==============================================================================
52 class FDApproximation(object):
54 Cette classe sert d'interface pour définir les opérateurs approximés. A la
55 création d'un objet, en fournissant une fonction "Function", on obtient un
56 objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
57 "AdjointOperator". On contrôle l'approximation DF avec l'incrément
58 multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
59 "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
60 centrées si le booléen "centeredDF" est vrai.
63 name = "FDApproximation",
68 extraArguments = None,
69 avoidingRedundancy = True,
70 toleranceInRedundancy = 1.e-18,
71 lenghtOfRedundancy = -1,
76 self.__name = str(name)
77 self.__extraArgs = extraArguments
80 import multiprocessing
81 self.__mpEnabled = True
83 self.__mpEnabled = False
85 self.__mpEnabled = False
86 self.__mpWorkers = mpWorkers
87 if self.__mpWorkers is not None and self.__mpWorkers < 1:
88 self.__mpWorkers = None
89 logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
92 self.__mfEnabled = True
94 self.__mfEnabled = False
95 logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
97 if avoidingRedundancy:
99 self.__tolerBP = float(toleranceInRedundancy)
100 self.__lenghtRJ = int(lenghtOfRedundancy)
101 self.__listJPCP = [] # Jacobian Previous Calculated Points
102 self.__listJPCI = [] # Jacobian Previous Calculated Increment
103 self.__listJPCR = [] # Jacobian Previous Calculated Results
104 self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
105 self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
107 self.__avoidRC = False
110 if isinstance(Function,types.FunctionType):
111 logging.debug("FDA Calculs en multiprocessing : FunctionType")
112 self.__userFunction__name = Function.__name__
114 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
116 mod = os.path.abspath(Function.__globals__['__file__'])
117 if not os.path.isfile(mod):
118 raise ImportError("No user defined function or method found with the name %s"%(mod,))
119 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
120 self.__userFunction__path = os.path.dirname(mod)
122 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
123 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
124 elif isinstance(Function,types.MethodType):
125 logging.debug("FDA Calculs en multiprocessing : MethodType")
126 self.__userFunction__name = Function.__name__
128 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
130 mod = os.path.abspath(Function.__func__.__globals__['__file__'])
131 if not os.path.isfile(mod):
132 raise ImportError("No user defined function or method found with the name %s"%(mod,))
133 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
134 self.__userFunction__path = os.path.dirname(mod)
136 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
137 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
139 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
141 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
142 self.__userFunction = self.__userOperator.appliedTo
144 self.__centeredDF = bool(centeredDF)
145 if abs(float(increment)) > 1.e-15:
146 self.__increment = float(increment)
148 self.__increment = 0.01
152 self.__dX = numpy.asmatrix(numpy.ravel( dX )).T
153 logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
155 logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
157 # ---------------------------------------------------------
158 def __doublon__(self, e, l, n, v=None):
159 __ac, __iac = False, -1
160 for i in range(len(l)-1,-1,-1):
161 if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
162 __ac, __iac = True, i
163 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
167 # ---------------------------------------------------------
168 def DirectOperator(self, X, **extraArgs ):
170 Calcul du direct à l'aide de la fonction fournie.
172 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
173 ne doivent pas être données ici à la fonction utilisateur.
175 logging.debug("FDA Calcul DirectOperator (explicite)")
177 _HX = self.__userFunction( X, argsAsSerie = True )
179 _X = numpy.asmatrix(numpy.ravel( X )).T
180 _HX = numpy.ravel(self.__userFunction( _X ))
184 # ---------------------------------------------------------
185 def TangentMatrix(self, X ):
187 Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
188 c'est-à-dire le gradient de H en X. On utilise des différences finies
189 directionnelles autour du point X. X est un numpy.matrix.
191 Différences finies centrées (approximation d'ordre 2):
192 1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
193 dX[i] à la composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
194 on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
196 2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
198 3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
200 Différences finies non centrées (approximation d'ordre 1):
201 1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
202 composante X[i] pour composer X_plus_dXi, et on calcule la réponse
203 HX_plus_dXi = H( X_plus_dXi )
204 2/ On calcule la valeur centrale HX = H(X)
205 3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
207 4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
210 logging.debug("FDA Début du calcul de la Jacobienne")
211 logging.debug("FDA Incrément de............: %s*X"%float(self.__increment))
212 logging.debug("FDA Approximation centrée...: %s"%(self.__centeredDF))
214 if X is None or len(X)==0:
215 raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
217 _X = numpy.asmatrix(numpy.ravel( X )).T
219 if self.__dX is None:
220 _dX = self.__increment * _X
222 _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
224 if (_dX == 0.).any():
227 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
229 _dX = numpy.where( _dX == 0., moyenne, _dX )
231 __alreadyCalculated = False
233 __bidon, __alreadyCalculatedP = self.__doublon__(_X, self.__listJPCP, self.__listJPPN, None)
234 __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
235 if __alreadyCalculatedP == __alreadyCalculatedI > -1:
236 __alreadyCalculated, __i = True, __alreadyCalculatedP
237 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
239 if __alreadyCalculated:
240 logging.debug("FDA Calcul Jacobienne (par récupération du doublon %i)"%__i)
241 _Jacobienne = self.__listJPCR[__i]
243 logging.debug("FDA Calcul Jacobienne (explicite)")
244 if self.__centeredDF:
246 if self.__mpEnabled and not self.__mfEnabled:
248 "__userFunction__path" : self.__userFunction__path,
249 "__userFunction__modl" : self.__userFunction__modl,
250 "__userFunction__name" : self.__userFunction__name,
253 for i in range( len(_dX) ):
255 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
256 _X_plus_dXi[i] = _X[i] + _dXi
257 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
258 _X_moins_dXi[i] = _X[i] - _dXi
260 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
261 _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
263 import multiprocessing
264 self.__pool = multiprocessing.Pool(self.__mpWorkers)
265 _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
270 for i in range( len(_dX) ):
271 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
273 elif self.__mfEnabled:
275 for i in range( len(_dX) ):
277 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
278 _X_plus_dXi[i] = _X[i] + _dXi
279 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
280 _X_moins_dXi[i] = _X[i] - _dXi
282 _xserie.append( _X_plus_dXi )
283 _xserie.append( _X_moins_dXi )
285 _HX_plusmoins_dX = self.DirectOperator( _xserie )
288 for i in range( len(_dX) ):
289 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
293 for i in range( _dX.size ):
295 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
296 _X_plus_dXi[i] = _X[i] + _dXi
297 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
298 _X_moins_dXi[i] = _X[i] - _dXi
300 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
301 _HX_moins_dXi = self.DirectOperator( _X_moins_dXi )
303 _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
307 if self.__mpEnabled and not self.__mfEnabled:
309 "__userFunction__path" : self.__userFunction__path,
310 "__userFunction__modl" : self.__userFunction__modl,
311 "__userFunction__name" : self.__userFunction__name,
314 _jobs.append( (_X.A1, self.__extraArgs, funcrepr) )
315 for i in range( len(_dX) ):
316 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
317 _X_plus_dXi[i] = _X[i] + _dX[i]
319 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
321 import multiprocessing
322 self.__pool = multiprocessing.Pool(self.__mpWorkers)
323 _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
327 _HX = _HX_plus_dX.pop(0)
330 for i in range( len(_dX) ):
331 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
333 elif self.__mfEnabled:
335 _xserie.append( _X.A1 )
336 for i in range( len(_dX) ):
337 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
338 _X_plus_dXi[i] = _X[i] + _dX[i]
340 _xserie.append( _X_plus_dXi )
342 _HX_plus_dX = self.DirectOperator( _xserie )
344 _HX = _HX_plus_dX.pop(0)
347 for i in range( len(_dX) ):
348 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
352 _HX = self.DirectOperator( _X )
353 for i in range( _dX.size ):
355 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
356 _X_plus_dXi[i] = _X[i] + _dXi
358 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
360 _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
363 _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
365 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
366 while len(self.__listJPCP) > self.__lenghtRJ:
367 self.__listJPCP.pop(0)
368 self.__listJPCI.pop(0)
369 self.__listJPCR.pop(0)
370 self.__listJPPN.pop(0)
371 self.__listJPIN.pop(0)
372 self.__listJPCP.append( copy.copy(_X) )
373 self.__listJPCI.append( copy.copy(_dX) )
374 self.__listJPCR.append( copy.copy(_Jacobienne) )
375 self.__listJPPN.append( numpy.linalg.norm(_X) )
376 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
378 logging.debug("FDA Fin du calcul de la Jacobienne")
382 # ---------------------------------------------------------
383 def TangentOperator(self, paire, **extraArgs ):
385 Calcul du tangent à l'aide de la Jacobienne.
387 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
388 ne doivent pas être données ici à la fonction utilisateur.
391 assert len(paire) == 1, "Incorrect lenght of arguments"
393 assert len(_paire) == 2, "Incorrect number of arguments"
395 assert len(paire) == 2, "Incorrect number of arguments"
398 _Jacobienne = self.TangentMatrix( X )
399 if dX is None or len(dX) == 0:
401 # Calcul de la forme matricielle si le second argument est None
402 # -------------------------------------------------------------
403 if self.__mfEnabled: return [_Jacobienne,]
404 else: return _Jacobienne
407 # Calcul de la valeur linéarisée de H en X appliqué à dX
408 # ------------------------------------------------------
409 _dX = numpy.asmatrix(numpy.ravel( dX )).T
410 _HtX = numpy.dot(_Jacobienne, _dX)
411 if self.__mfEnabled: return [_HtX.A1,]
414 # ---------------------------------------------------------
415 def AdjointOperator(self, paire, **extraArgs ):
417 Calcul de l'adjoint à l'aide de la Jacobienne.
419 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
420 ne doivent pas être données ici à la fonction utilisateur.
423 assert len(paire) == 1, "Incorrect lenght of arguments"
425 assert len(_paire) == 2, "Incorrect number of arguments"
427 assert len(paire) == 2, "Incorrect number of arguments"
430 _JacobienneT = self.TangentMatrix( X ).T
431 if Y is None or len(Y) == 0:
433 # Calcul de la forme matricielle si le second argument est None
434 # -------------------------------------------------------------
435 if self.__mfEnabled: return [_JacobienneT,]
436 else: return _JacobienneT
439 # Calcul de la valeur de l'adjoint en X appliqué à Y
440 # --------------------------------------------------
441 _Y = numpy.asmatrix(numpy.ravel( Y )).T
442 _HaY = numpy.dot(_JacobienneT, _Y)
443 if self.__mfEnabled: return [_HaY.A1,]
446 # ==============================================================================
447 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
448 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
450 _bgcenter = numpy.ravel(_bgcenter)[:,None]
452 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
454 if _bgcovariance is None:
455 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
457 _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
458 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
460 return BackgroundEnsemble
462 # ==============================================================================
463 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
464 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
465 def __CenteredRandomAnomalies(Zr, N):
467 Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
468 notes manuscrites de MB et conforme au code de PS avec eps = -1
471 Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
472 Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
473 R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
478 _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
480 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
481 if _bgcovariance is None:
482 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
485 U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
486 _nbctl = _bgcenter.size
487 if _nbmembers > _nbctl:
488 _Z = numpy.concatenate((numpy.dot(
489 numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
490 numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
492 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
493 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
494 BackgroundEnsemble = _bgcenter + _Zca
496 if max(abs(_bgcovariance.flatten())) > 0:
497 _nbctl = _bgcenter.size
498 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
499 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
500 BackgroundEnsemble = _bgcenter + _Zca
502 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
504 return BackgroundEnsemble
506 # ==============================================================================
507 def EnsembleMean( __Ensemble ):
508 "Renvoie la moyenne empirique d'un ensemble"
509 return numpy.asarray(__Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
511 # ==============================================================================
512 def EnsembleOfAnomalies( Ensemble, OptMean = None, Normalisation = 1.):
513 "Renvoie les anomalies centrées à partir d'un ensemble"
515 __Em = EnsembleMean( Ensemble )
517 __Em = numpy.ravel(OptMean).reshape((-1,1))
519 return Normalisation * (numpy.asarray(Ensemble) - __Em)
521 # ==============================================================================
522 def EnsembleErrorCovariance( Ensemble, __quick = False ):
523 "Renvoie l'estimation empirique de la covariance d'ensemble"
525 # Covariance rapide mais rarement définie positive
526 __Covariance = numpy.cov(Ensemble)
528 # Résultat souvent identique à numpy.cov, mais plus robuste
529 __n, __m = numpy.asarray(Ensemble).shape
530 __Anomalies = EnsembleOfAnomalies( Ensemble )
531 # Estimation empirique
532 __Covariance = (__Anomalies @ __Anomalies.T) / (__m-1)
534 __Covariance = (__Covariance + __Covariance.T) * 0.5
535 # Assure la positivité
536 __epsilon = mpr*numpy.trace(__Covariance)
537 __Covariance = __Covariance + __epsilon * numpy.identity(__n)
541 # ==============================================================================
542 def EnsemblePerturbationWithGivenCovariance( __Ensemble, __Covariance, __Seed=None ):
543 "Ajout d'une perturbation à chaque membre d'un ensemble selon une covariance prescrite"
544 if hasattr(__Covariance,"assparsematrix"):
545 if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance.assparsematrix())/abs(__Ensemble).mean() < mpr).all():
546 # Traitement d'une covariance nulle ou presque
548 if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance.assparsematrix()) < mpr).all():
549 # Traitement d'une covariance nulle ou presque
552 if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance)/abs(__Ensemble).mean() < mpr).all():
553 # Traitement d'une covariance nulle ou presque
555 if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance) < mpr).all():
556 # Traitement d'une covariance nulle ou presque
559 __n, __m = __Ensemble.shape
560 if __Seed is not None: numpy.random.seed(__Seed)
562 if hasattr(__Covariance,"isscalar") and __Covariance.isscalar():
563 # Traitement d'une covariance multiple de l'identité
565 __std = numpy.sqrt(__Covariance.assparsematrix())
566 __Ensemble += numpy.random.normal(__zero, __std, size=(__m,__n)).T
568 elif hasattr(__Covariance,"isvector") and __Covariance.isvector():
569 # Traitement d'une covariance diagonale avec variances non identiques
570 __zero = numpy.zeros(__n)
571 __std = numpy.sqrt(__Covariance.assparsematrix())
572 __Ensemble += numpy.asarray([numpy.random.normal(__zero, __std) for i in range(__m)]).T
574 elif hasattr(__Covariance,"ismatrix") and __Covariance.ismatrix():
575 # Traitement d'une covariance pleine
576 __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance.asfullmatrix(__n), size=__m).T
578 elif isinstance(__Covariance, numpy.ndarray):
579 # Traitement d'une covariance numpy pleine, sachant qu'on arrive ici en dernier
580 __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance, size=__m).T
583 raise ValueError("Error in ensemble perturbation with inadequate covariance specification")
587 # ==============================================================================
588 def CovarianceInflation(
590 InflationType = None,
591 InflationFactor = None,
592 BackgroundCov = None,
595 Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
597 Synthèse : Hunt 2007, section 2.3.5
599 if InflationFactor is None:
602 InflationFactor = float(InflationFactor)
604 if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
605 if InflationFactor < 1.:
606 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
607 if InflationFactor < 1.+mpr:
609 OutputCovOrEns = InflationFactor**2 * InputCovOrEns
611 elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
612 if InflationFactor < 1.:
613 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
614 if InflationFactor < 1.+mpr:
616 InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
617 OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
618 + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
620 elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
621 if InflationFactor < 0.:
622 raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
623 if InflationFactor < mpr:
625 __n, __m = numpy.asarray(InputCovOrEns).shape
627 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
628 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
630 elif InflationType == "HybridOnBackgroundCovariance":
631 if InflationFactor < 0.:
632 raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
633 if InflationFactor < mpr:
635 __n, __m = numpy.asarray(InputCovOrEns).shape
637 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
638 if BackgroundCov is None:
639 raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
640 if InputCovOrEns.shape != BackgroundCov.shape:
641 raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
642 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
644 elif InflationType == "Relaxation":
645 raise NotImplementedError("InflationType Relaxation")
648 raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
650 return OutputCovOrEns
652 # ==============================================================================
653 def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None):
654 "Estimation des quantiles a posteriori (selfA est modifié)"
655 nbsamples = selfA._parameters["NumberOfSamplesForQuantiles"]
657 # Traitement des bornes
658 if "StateBoundsForQuantiles" in selfA._parameters:
659 LBounds = selfA._parameters["StateBoundsForQuantiles"] # Prioritaire
660 elif "Bounds" in selfA._parameters:
661 LBounds = selfA._parameters["Bounds"] # Défaut raisonnable
664 if LBounds is not None:
665 def NoneRemove(paire):
667 if bmin is None: bmin = numpy.finfo('float').min
668 if bmax is None: bmax = numpy.finfo('float').max
670 LBounds = numpy.matrix( [NoneRemove(paire) for paire in LBounds] )
672 # Échantillonnage des états
675 if selfA._parameters["SimulationForQuantiles"] == "Linear" and HXa is not None:
676 HXa = numpy.matrix(numpy.ravel( HXa )).T
677 for i in range(nbsamples):
678 if selfA._parameters["SimulationForQuantiles"] == "Linear" and HtM is not None:
679 dXr = numpy.matrix(numpy.random.multivariate_normal(numpy.ravel(Xa),A) - numpy.ravel(Xa)).T
680 if LBounds is not None: # "EstimateProjection" par défaut
681 dXr = numpy.max(numpy.hstack((dXr,LBounds[:,0]) - Xa),axis=1)
682 dXr = numpy.min(numpy.hstack((dXr,LBounds[:,1]) - Xa),axis=1)
683 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
685 if selfA._toStore("SampledStateForQuantiles"): Xr = Xa + dXr
686 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear" and Hm is not None:
687 Xr = numpy.matrix(numpy.random.multivariate_normal(numpy.ravel(Xa),A)).T
688 if LBounds is not None: # "EstimateProjection" par défaut
689 Xr = numpy.max(numpy.hstack((Xr,LBounds[:,0])),axis=1)
690 Xr = numpy.min(numpy.hstack((Xr,LBounds[:,1])),axis=1)
691 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
693 raise ValueError("Quantile simulations has only to be Linear or NonLinear.")
697 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.ravel(Xr)
699 YfQ = numpy.hstack((YfQ,Yr))
700 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.vstack((EXr,numpy.ravel(Xr)))
702 # Extraction des quantiles
705 for quantile in selfA._parameters["Quantiles"]:
706 if not (0. <= float(quantile) <= 1.): continue
707 indice = int(nbsamples * float(quantile) - 1./nbsamples)
708 if YQ is None: YQ = YfQ[:,indice]
709 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
710 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
711 if selfA._toStore("SampledStateForQuantiles"):
712 selfA.StoredVariables["SampledStateForQuantiles"].store( EXr.T )
716 # ==============================================================================
717 def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
723 H = HO["Direct"].appliedControledFormTo
725 if selfA._parameters["EstimationOf"] == "State":
726 M = EM["Direct"].appliedControledFormTo
728 if CM is not None and "Tangent" in CM and U is not None:
729 Cm = CM["Tangent"].asMatrix(Xb)
733 # Précalcul des inversions de B et R
736 # Durée d'observation et tailles
737 LagL = selfA._parameters["SmootherLagL"]
738 if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
739 raise ValueError("Fixed-lag smoother requires a series of observation")
740 if Y.stepnumber() < LagL:
741 raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
742 duration = Y.stepnumber()
743 __p = numpy.cumprod(Y.shape())[-1]
745 __m = selfA._parameters["NumberOfMembers"]
747 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
748 selfA.StoredVariables["Analysis"].store( Xb )
749 if selfA._toStore("APosterioriCovariance"):
750 if hasattr(B,"asfullmatrix"):
751 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
753 selfA.StoredVariables["APosterioriCovariance"].store( B )
755 # Calcul direct initial (on privilégie la mémorisation au recalcul)
756 __seed = numpy.random.get_state()
757 selfB = copy.deepcopy(selfA)
758 selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
759 if VariantM == "EnKS16-KalmanFilterFormula":
760 etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
762 raise ValueError("VariantM has to be chosen in the authorized methods list.")
764 EL = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
766 EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
767 selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
769 for step in range(LagL,duration-1):
771 sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
774 if hasattr(Y,"store"):
775 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
777 Ynpu = numpy.ravel( Y ).reshape((__p,1))
780 if hasattr(U,"store") and len(U)>1:
781 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
782 elif hasattr(U,"store") and len(U)==1:
783 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
785 Un = numpy.asmatrix(numpy.ravel( U )).T
789 #--------------------------
790 if VariantM == "EnKS16-KalmanFilterFormula":
791 if selfA._parameters["EstimationOf"] == "State": # Forecast
792 EL = M( [(EL[:,i], Un) for i in range(__m)],
794 returnSerieAsArrayMatrix = True )
795 EL = EnsemblePerturbationWithGivenCovariance( EL, Q )
796 EZ = H( [(EL[:,i], Un) for i in range(__m)],
798 returnSerieAsArrayMatrix = True )
799 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
800 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
802 elif selfA._parameters["EstimationOf"] == "Parameters":
803 # --- > Par principe, M = Id, Q = 0
804 EZ = H( [(EL[:,i], Un) for i in range(__m)],
806 returnSerieAsArrayMatrix = True )
808 vEm = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
809 vZm = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
811 mS = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
812 mS = mS.reshape((-1,__m)) # Pour dimension 1
813 delta = RIdemi @ ( Ynpu - vZm )
814 mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
815 vw = mT @ mS.T @ delta
817 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
818 mU = numpy.identity(__m)
819 wTU = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
821 EX = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
825 for irl in range(LagL): # Lissage des L précédentes analysis
826 vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
827 EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
828 sEL[irl] = vEm + EX @ wTU
830 # Conservation de l'analyse retrospective d'ordre 0 avant rotation
831 Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
832 if selfA._toStore("APosterioriCovariance"):
835 for irl in range(LagL):
836 sEL[irl] = sEL[irl+1]
838 #--------------------------
840 raise ValueError("VariantM has to be chosen in the authorized methods list.")
842 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
844 selfA.StoredVariables["Analysis"].store( Xa )
845 if selfA._toStore("APosterioriCovariance"):
846 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
848 # Stockage des dernières analyses incomplètement remises à jour
849 for irl in range(LagL):
850 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
851 Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
852 selfA.StoredVariables["Analysis"].store( Xa )
856 # ==============================================================================
857 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
859 Ensemble-Transform EnKF
861 if selfA._parameters["EstimationOf"] == "Parameters":
862 selfA._parameters["StoreInternalVariables"] = True
865 H = HO["Direct"].appliedControledFormTo
867 if selfA._parameters["EstimationOf"] == "State":
868 M = EM["Direct"].appliedControledFormTo
870 if CM is not None and "Tangent" in CM and U is not None:
871 Cm = CM["Tangent"].asMatrix(Xb)
875 # Durée d'observation et tailles
876 if hasattr(Y,"stepnumber"):
877 duration = Y.stepnumber()
878 __p = numpy.cumprod(Y.shape())[-1]
881 __p = numpy.array(Y).size
883 # Précalcul des inversions de B et R
884 if selfA._parameters["StoreInternalVariables"] \
885 or selfA._toStore("CostFunctionJ") \
886 or selfA._toStore("CostFunctionJb") \
887 or selfA._toStore("CostFunctionJo") \
888 or selfA._toStore("CurrentOptimum") \
889 or selfA._toStore("APosterioriCovariance"):
892 elif VariantM != "KalmanFilterFormula":
894 if VariantM == "KalmanFilterFormula":
898 __m = selfA._parameters["NumberOfMembers"]
900 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
901 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
902 selfA.StoredVariables["Analysis"].store( Xb )
903 if selfA._toStore("APosterioriCovariance"):
904 if hasattr(B,"asfullmatrix"):
905 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
907 selfA.StoredVariables["APosterioriCovariance"].store( B )
908 selfA._setInternalState("seed", numpy.random.get_state())
909 elif selfA._parameters["nextStep"]:
910 Xn = selfA._getInternalState("Xn")
912 previousJMinimum = numpy.finfo(float).max
914 for step in range(duration-1):
915 numpy.random.set_state(selfA._getInternalState("seed"))
916 if hasattr(Y,"store"):
917 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
919 Ynpu = numpy.ravel( Y ).reshape((__p,1))
922 if hasattr(U,"store") and len(U)>1:
923 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
924 elif hasattr(U,"store") and len(U)==1:
925 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
927 Un = numpy.asmatrix(numpy.ravel( U )).T
931 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
932 Xn = CovarianceInflation( Xn,
933 selfA._parameters["InflationType"],
934 selfA._parameters["InflationFactor"],
937 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
938 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
940 returnSerieAsArrayMatrix = True )
941 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
942 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
944 returnSerieAsArrayMatrix = True )
945 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
946 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
947 Xn_predicted = Xn_predicted + Cm * Un
948 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
949 # --- > Par principe, M = Id, Q = 0
950 Xn_predicted = EMX = Xn
951 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
953 returnSerieAsArrayMatrix = True )
955 # Mean of forecast and observation of forecast
956 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
957 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
960 EaX = EnsembleOfAnomalies( Xn_predicted, Xfm )
961 EaHX = EnsembleOfAnomalies( HX_predicted, Hfm)
963 #--------------------------
964 if VariantM == "KalmanFilterFormula":
965 mS = RIdemi * EaHX / math.sqrt(__m-1)
966 mS = mS.reshape((-1,__m)) # Pour dimension 1
967 delta = RIdemi * ( Ynpu - Hfm )
968 mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
969 vw = mT @ mS.T @ delta
971 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
972 mU = numpy.identity(__m)
974 EaX = EaX / math.sqrt(__m-1)
975 Xn = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
976 #--------------------------
977 elif VariantM == "Variational":
978 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
980 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
981 _Jo = 0.5 * _A.T @ (RI * _A)
982 _Jb = 0.5 * (__m-1) * w.T @ w
985 def GradientOfCostFunction(w):
986 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
987 _GardJo = - EaHX.T @ (RI * _A)
988 _GradJb = (__m-1) * w.reshape((__m,1))
989 _GradJ = _GardJo + _GradJb
990 return numpy.ravel(_GradJ)
991 vw = scipy.optimize.fmin_cg(
993 x0 = numpy.zeros(__m),
994 fprime = GradientOfCostFunction,
999 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1000 Htb = (__m-1) * numpy.identity(__m)
1003 Pta = numpy.linalg.inv( Hta )
1004 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1006 Xn = Xfm + EaX @ (vw[:,None] + EWa)
1007 #--------------------------
1008 elif VariantM == "FiniteSize11": # Jauge Boc2011
1009 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1010 def CostFunction(w):
1011 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1012 _Jo = 0.5 * _A.T @ (RI * _A)
1013 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
1016 def GradientOfCostFunction(w):
1017 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1018 _GardJo = - EaHX.T @ (RI * _A)
1019 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1020 _GradJ = _GardJo + _GradJb
1021 return numpy.ravel(_GradJ)
1022 vw = scipy.optimize.fmin_cg(
1024 x0 = numpy.zeros(__m),
1025 fprime = GradientOfCostFunction,
1030 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1032 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1033 / (1 + 1/__m + vw.T @ vw)**2
1036 Pta = numpy.linalg.inv( Hta )
1037 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1039 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1040 #--------------------------
1041 elif VariantM == "FiniteSize15": # Jauge Boc2015
1042 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1043 def CostFunction(w):
1044 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1045 _Jo = 0.5 * _A.T * RI * _A
1046 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
1049 def GradientOfCostFunction(w):
1050 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1051 _GardJo = - EaHX.T @ (RI * _A)
1052 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1053 _GradJ = _GardJo + _GradJb
1054 return numpy.ravel(_GradJ)
1055 vw = scipy.optimize.fmin_cg(
1057 x0 = numpy.zeros(__m),
1058 fprime = GradientOfCostFunction,
1063 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1065 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1066 / (1 + 1/__m + vw.T @ vw)**2
1069 Pta = numpy.linalg.inv( Hta )
1070 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1072 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1073 #--------------------------
1074 elif VariantM == "FiniteSize16": # Jauge Boc2016
1075 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1076 def CostFunction(w):
1077 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1078 _Jo = 0.5 * _A.T @ (RI * _A)
1079 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
1082 def GradientOfCostFunction(w):
1083 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1084 _GardJo = - EaHX.T @ (RI * _A)
1085 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
1086 _GradJ = _GardJo + _GradJb
1087 return numpy.ravel(_GradJ)
1088 vw = scipy.optimize.fmin_cg(
1090 x0 = numpy.zeros(__m),
1091 fprime = GradientOfCostFunction,
1096 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1097 Htb = ((__m+1) / (__m-1)) * \
1098 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
1099 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
1102 Pta = numpy.linalg.inv( Hta )
1103 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1105 Xn = Xfm + EaX @ (vw[:,None] + EWa)
1106 #--------------------------
1108 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1110 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1111 Xn = CovarianceInflation( Xn,
1112 selfA._parameters["InflationType"],
1113 selfA._parameters["InflationFactor"],
1116 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1117 #--------------------------
1118 selfA._setInternalState("Xn", Xn)
1119 selfA._setInternalState("seed", numpy.random.get_state())
1120 #--------------------------
1122 if selfA._parameters["StoreInternalVariables"] \
1123 or selfA._toStore("CostFunctionJ") \
1124 or selfA._toStore("CostFunctionJb") \
1125 or selfA._toStore("CostFunctionJo") \
1126 or selfA._toStore("APosterioriCovariance") \
1127 or selfA._toStore("InnovationAtCurrentAnalysis") \
1128 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1129 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1130 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1131 _Innovation = Ynpu - _HXa
1133 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1134 # ---> avec analysis
1135 selfA.StoredVariables["Analysis"].store( Xa )
1136 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1137 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1138 if selfA._toStore("InnovationAtCurrentAnalysis"):
1139 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1140 # ---> avec current state
1141 if selfA._parameters["StoreInternalVariables"] \
1142 or selfA._toStore("CurrentState"):
1143 selfA.StoredVariables["CurrentState"].store( Xn )
1144 if selfA._toStore("ForecastState"):
1145 selfA.StoredVariables["ForecastState"].store( EMX )
1146 if selfA._toStore("ForecastCovariance"):
1147 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
1148 if selfA._toStore("BMA"):
1149 selfA.StoredVariables["BMA"].store( EMX - Xa )
1150 if selfA._toStore("InnovationAtCurrentState"):
1151 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1152 if selfA._toStore("SimulatedObservationAtCurrentState") \
1153 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1154 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1156 if selfA._parameters["StoreInternalVariables"] \
1157 or selfA._toStore("CostFunctionJ") \
1158 or selfA._toStore("CostFunctionJb") \
1159 or selfA._toStore("CostFunctionJo") \
1160 or selfA._toStore("CurrentOptimum") \
1161 or selfA._toStore("APosterioriCovariance"):
1162 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1163 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1165 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1166 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1167 selfA.StoredVariables["CostFunctionJ" ].store( J )
1169 if selfA._toStore("IndexOfOptimum") \
1170 or selfA._toStore("CurrentOptimum") \
1171 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1172 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1173 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1174 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1175 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1176 if selfA._toStore("IndexOfOptimum"):
1177 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1178 if selfA._toStore("CurrentOptimum"):
1179 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1180 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1181 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1182 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1183 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1184 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1185 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1186 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1187 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1188 if selfA._toStore("APosterioriCovariance"):
1189 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1190 if selfA._parameters["EstimationOf"] == "Parameters" \
1191 and J < previousJMinimum:
1192 previousJMinimum = J
1194 if selfA._toStore("APosterioriCovariance"):
1195 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1196 # ---> Pour les smoothers
1197 if selfA._toStore("CurrentEnsembleState"):
1198 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
1200 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1201 # ----------------------------------------------------------------------
1202 if selfA._parameters["EstimationOf"] == "Parameters":
1203 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1204 selfA.StoredVariables["Analysis"].store( XaMin )
1205 if selfA._toStore("APosterioriCovariance"):
1206 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1207 if selfA._toStore("BMA"):
1208 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1212 # ==============================================================================
1213 def exkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1215 Extended Kalman Filter
1217 if selfA._parameters["EstimationOf"] == "Parameters":
1218 selfA._parameters["StoreInternalVariables"] = True
1221 H = HO["Direct"].appliedControledFormTo
1223 if selfA._parameters["EstimationOf"] == "State":
1224 M = EM["Direct"].appliedControledFormTo
1226 if CM is not None and "Tangent" in CM and U is not None:
1227 Cm = CM["Tangent"].asMatrix(Xb)
1231 # Durée d'observation et tailles
1232 if hasattr(Y,"stepnumber"):
1233 duration = Y.stepnumber()
1234 __p = numpy.cumprod(Y.shape())[-1]
1237 __p = numpy.array(Y).size
1239 # Précalcul des inversions de B et R
1240 if selfA._parameters["StoreInternalVariables"] \
1241 or selfA._toStore("CostFunctionJ") \
1242 or selfA._toStore("CostFunctionJb") \
1243 or selfA._toStore("CostFunctionJo") \
1244 or selfA._toStore("CurrentOptimum") \
1245 or selfA._toStore("APosterioriCovariance"):
1251 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1254 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1255 selfA.StoredVariables["Analysis"].store( Xb )
1256 if selfA._toStore("APosterioriCovariance"):
1257 if hasattr(B,"asfullmatrix"):
1258 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1260 selfA.StoredVariables["APosterioriCovariance"].store( B )
1261 selfA._setInternalState("seed", numpy.random.get_state())
1262 elif selfA._parameters["nextStep"]:
1263 Xn = selfA._getInternalState("Xn")
1264 Pn = selfA._getInternalState("Pn")
1266 if selfA._parameters["EstimationOf"] == "Parameters":
1268 previousJMinimum = numpy.finfo(float).max
1270 for step in range(duration-1):
1271 if hasattr(Y,"store"):
1272 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1274 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1276 Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
1277 Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
1278 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1279 Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
1281 if selfA._parameters["EstimationOf"] == "State":
1282 Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
1283 Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
1284 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1285 Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
1288 if hasattr(U,"store") and len(U)>1:
1289 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1290 elif hasattr(U,"store") and len(U)==1:
1291 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1293 Un = numpy.asmatrix(numpy.ravel( U )).T
1297 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1298 Xn_predicted = numpy.asmatrix(numpy.ravel( M( (Xn, Un) ) )).T
1299 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1300 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1301 Xn_predicted = Xn_predicted + Cm * Un
1302 Pn_predicted = Q + Mt * Pn * Ma
1303 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1304 # --- > Par principe, M = Id, Q = 0
1308 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1309 Xn_predicted = numpy.max(numpy.hstack((Xn_predicted,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
1310 Xn_predicted = numpy.min(numpy.hstack((Xn_predicted,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
1312 if selfA._parameters["EstimationOf"] == "State":
1313 HX_predicted = numpy.asmatrix(numpy.ravel( H( (Xn_predicted, None) ) )).T
1314 _Innovation = Ynpu - HX_predicted
1315 elif selfA._parameters["EstimationOf"] == "Parameters":
1316 HX_predicted = numpy.asmatrix(numpy.ravel( H( (Xn_predicted, Un) ) )).T
1317 _Innovation = Ynpu - HX_predicted
1318 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
1319 _Innovation = _Innovation - Cm * Un
1321 Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
1322 Xn = Xn_predicted + Kn * _Innovation
1323 Pn = Pn_predicted - Kn * Ht * Pn_predicted
1326 #--------------------------
1327 selfA._setInternalState("Xn", Xn)
1328 selfA._setInternalState("Pn", Pn)
1329 #--------------------------
1331 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1332 # ---> avec analysis
1333 selfA.StoredVariables["Analysis"].store( Xa )
1334 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1335 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
1336 if selfA._toStore("InnovationAtCurrentAnalysis"):
1337 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1338 # ---> avec current state
1339 if selfA._parameters["StoreInternalVariables"] \
1340 or selfA._toStore("CurrentState"):
1341 selfA.StoredVariables["CurrentState"].store( Xn )
1342 if selfA._toStore("ForecastState"):
1343 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1344 if selfA._toStore("ForecastCovariance"):
1345 selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
1346 if selfA._toStore("BMA"):
1347 selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
1348 if selfA._toStore("InnovationAtCurrentState"):
1349 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1350 if selfA._toStore("SimulatedObservationAtCurrentState") \
1351 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1352 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1354 if selfA._parameters["StoreInternalVariables"] \
1355 or selfA._toStore("CostFunctionJ") \
1356 or selfA._toStore("CostFunctionJb") \
1357 or selfA._toStore("CostFunctionJo") \
1358 or selfA._toStore("CurrentOptimum") \
1359 or selfA._toStore("APosterioriCovariance"):
1360 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1361 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1363 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1364 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1365 selfA.StoredVariables["CostFunctionJ" ].store( J )
1367 if selfA._toStore("IndexOfOptimum") \
1368 or selfA._toStore("CurrentOptimum") \
1369 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1370 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1371 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1372 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1373 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1374 if selfA._toStore("IndexOfOptimum"):
1375 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1376 if selfA._toStore("CurrentOptimum"):
1377 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1378 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1379 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1380 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1381 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1382 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1383 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1384 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1385 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1386 if selfA._toStore("APosterioriCovariance"):
1387 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1388 if selfA._parameters["EstimationOf"] == "Parameters" \
1389 and J < previousJMinimum:
1390 previousJMinimum = J
1392 if selfA._toStore("APosterioriCovariance"):
1393 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1395 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1396 # ----------------------------------------------------------------------
1397 if selfA._parameters["EstimationOf"] == "Parameters":
1398 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1399 selfA.StoredVariables["Analysis"].store( XaMin )
1400 if selfA._toStore("APosterioriCovariance"):
1401 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1402 if selfA._toStore("BMA"):
1403 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1407 # ==============================================================================
1408 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
1409 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1413 if selfA._parameters["EstimationOf"] == "Parameters":
1414 selfA._parameters["StoreInternalVariables"] = True
1417 H = HO["Direct"].appliedControledFormTo
1419 if selfA._parameters["EstimationOf"] == "State":
1420 M = EM["Direct"].appliedControledFormTo
1422 if CM is not None and "Tangent" in CM and U is not None:
1423 Cm = CM["Tangent"].asMatrix(Xb)
1427 # Durée d'observation et tailles
1428 if hasattr(Y,"stepnumber"):
1429 duration = Y.stepnumber()
1430 __p = numpy.cumprod(Y.shape())[-1]
1433 __p = numpy.array(Y).size
1435 # Précalcul des inversions de B et R
1436 if selfA._parameters["StoreInternalVariables"] \
1437 or selfA._toStore("CostFunctionJ") \
1438 or selfA._toStore("CostFunctionJb") \
1439 or selfA._toStore("CostFunctionJo") \
1440 or selfA._toStore("CurrentOptimum") \
1441 or selfA._toStore("APosterioriCovariance"):
1446 __m = selfA._parameters["NumberOfMembers"]
1448 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1449 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1451 Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
1452 selfA.StoredVariables["Analysis"].store( Xb )
1453 if selfA._toStore("APosterioriCovariance"):
1454 if hasattr(B,"asfullmatrix"):
1455 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1457 selfA.StoredVariables["APosterioriCovariance"].store( B )
1458 selfA._setInternalState("seed", numpy.random.get_state())
1459 elif selfA._parameters["nextStep"]:
1460 Xn = selfA._getInternalState("Xn")
1462 previousJMinimum = numpy.finfo(float).max
1464 for step in range(duration-1):
1465 numpy.random.set_state(selfA._getInternalState("seed"))
1466 if hasattr(Y,"store"):
1467 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1469 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1472 if hasattr(U,"store") and len(U)>1:
1473 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1474 elif hasattr(U,"store") and len(U)==1:
1475 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1477 Un = numpy.asmatrix(numpy.ravel( U )).T
1481 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1482 Xn = CovarianceInflation( Xn,
1483 selfA._parameters["InflationType"],
1484 selfA._parameters["InflationFactor"],
1487 #--------------------------
1488 if VariantM == "IEnKF12":
1489 Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
1490 EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
1494 Ta = numpy.identity(__m)
1495 vw = numpy.zeros(__m)
1496 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1497 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1500 E1 = vx1 + _epsilon * EaX
1502 E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1504 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
1505 E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1507 returnSerieAsArrayMatrix = True )
1508 elif selfA._parameters["EstimationOf"] == "Parameters":
1509 # --- > Par principe, M = Id
1511 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1512 vy1 = H((vx2, Un)).reshape((__p,1))
1514 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
1516 returnSerieAsArrayMatrix = True )
1517 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1520 EaY = (HE2 - vy2) / _epsilon
1522 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1524 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
1525 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
1526 Deltaw = - numpy.linalg.solve(mH,GradJ)
1531 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1535 A2 = EnsembleOfAnomalies( E2 )
1538 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1539 A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
1542 #--------------------------
1544 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1546 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1547 Xn = CovarianceInflation( Xn,
1548 selfA._parameters["InflationType"],
1549 selfA._parameters["InflationFactor"],
1552 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1553 #--------------------------
1554 selfA._setInternalState("Xn", Xn)
1555 selfA._setInternalState("seed", numpy.random.get_state())
1556 #--------------------------
1558 if selfA._parameters["StoreInternalVariables"] \
1559 or selfA._toStore("CostFunctionJ") \
1560 or selfA._toStore("CostFunctionJb") \
1561 or selfA._toStore("CostFunctionJo") \
1562 or selfA._toStore("APosterioriCovariance") \
1563 or selfA._toStore("InnovationAtCurrentAnalysis") \
1564 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1565 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1566 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1567 _Innovation = Ynpu - _HXa
1569 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1570 # ---> avec analysis
1571 selfA.StoredVariables["Analysis"].store( Xa )
1572 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1573 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1574 if selfA._toStore("InnovationAtCurrentAnalysis"):
1575 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1576 # ---> avec current state
1577 if selfA._parameters["StoreInternalVariables"] \
1578 or selfA._toStore("CurrentState"):
1579 selfA.StoredVariables["CurrentState"].store( Xn )
1580 if selfA._toStore("ForecastState"):
1581 selfA.StoredVariables["ForecastState"].store( E2 )
1582 if selfA._toStore("ForecastCovariance"):
1583 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(E2) )
1584 if selfA._toStore("BMA"):
1585 selfA.StoredVariables["BMA"].store( E2 - Xa )
1586 if selfA._toStore("InnovationAtCurrentState"):
1587 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1588 if selfA._toStore("SimulatedObservationAtCurrentState") \
1589 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1590 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1592 if selfA._parameters["StoreInternalVariables"] \
1593 or selfA._toStore("CostFunctionJ") \
1594 or selfA._toStore("CostFunctionJb") \
1595 or selfA._toStore("CostFunctionJo") \
1596 or selfA._toStore("CurrentOptimum") \
1597 or selfA._toStore("APosterioriCovariance"):
1598 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1599 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1601 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1602 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1603 selfA.StoredVariables["CostFunctionJ" ].store( J )
1605 if selfA._toStore("IndexOfOptimum") \
1606 or selfA._toStore("CurrentOptimum") \
1607 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1608 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1609 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1610 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1611 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1612 if selfA._toStore("IndexOfOptimum"):
1613 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1614 if selfA._toStore("CurrentOptimum"):
1615 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1616 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1617 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1618 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1619 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1620 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1621 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1622 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1623 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1624 if selfA._toStore("APosterioriCovariance"):
1625 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1626 if selfA._parameters["EstimationOf"] == "Parameters" \
1627 and J < previousJMinimum:
1628 previousJMinimum = J
1630 if selfA._toStore("APosterioriCovariance"):
1631 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1633 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1634 # ----------------------------------------------------------------------
1635 if selfA._parameters["EstimationOf"] == "Parameters":
1636 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1637 selfA.StoredVariables["Analysis"].store( XaMin )
1638 if selfA._toStore("APosterioriCovariance"):
1639 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1640 if selfA._toStore("BMA"):
1641 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1645 # ==============================================================================
1646 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1654 # Opérateur non-linéaire pour la boucle externe
1655 Hm = HO["Direct"].appliedTo
1657 # Précalcul des inversions de B et R
1661 # Point de démarrage de l'optimisation
1662 Xini = selfA._parameters["InitializationPoint"]
1664 HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
1665 Innovation = Y - HXb
1672 Xr = Xini.reshape((-1,1))
1673 while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
1677 Ht = HO["Tangent"].asMatrix(Xr)
1678 Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
1680 # Définition de la fonction-coût
1681 # ------------------------------
1682 def CostFunction(dx):
1683 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1684 if selfA._parameters["StoreInternalVariables"] or \
1685 selfA._toStore("CurrentState") or \
1686 selfA._toStore("CurrentOptimum"):
1687 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
1689 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1690 _dInnovation = Innovation - _HdX
1691 if selfA._toStore("SimulatedObservationAtCurrentState") or \
1692 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1693 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
1694 if selfA._toStore("InnovationAtCurrentState"):
1695 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
1697 Jb = float( 0.5 * _dX.T * BI * _dX )
1698 Jo = float( 0.5 * _dInnovation.T * RI * _dInnovation )
1701 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1702 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1703 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1704 selfA.StoredVariables["CostFunctionJ" ].store( J )
1705 if selfA._toStore("IndexOfOptimum") or \
1706 selfA._toStore("CurrentOptimum") or \
1707 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1708 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1709 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1710 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1711 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1712 if selfA._toStore("IndexOfOptimum"):
1713 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1714 if selfA._toStore("CurrentOptimum"):
1715 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1716 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1717 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1718 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1719 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1720 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1721 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1722 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1723 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1726 def GradientOfCostFunction(dx):
1727 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1729 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1730 _dInnovation = Innovation - _HdX
1732 GradJo = - Ht.T @ (RI * _dInnovation)
1733 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1736 # Minimisation de la fonctionnelle
1737 # --------------------------------
1738 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1740 if selfA._parameters["Minimizer"] == "LBFGSB":
1741 # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1742 if "0.19" <= scipy.version.version <= "1.1.0":
1743 import lbfgsbhlt as optimiseur
1745 import scipy.optimize as optimiseur
1746 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1747 func = CostFunction,
1748 x0 = numpy.zeros(Xini.size),
1749 fprime = GradientOfCostFunction,
1751 bounds = selfA._parameters["Bounds"],
1752 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
1753 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
1754 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1755 iprint = selfA._parameters["optiprint"],
1757 nfeval = Informations['funcalls']
1758 rc = Informations['warnflag']
1759 elif selfA._parameters["Minimizer"] == "TNC":
1760 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1761 func = CostFunction,
1762 x0 = numpy.zeros(Xini.size),
1763 fprime = GradientOfCostFunction,
1765 bounds = selfA._parameters["Bounds"],
1766 maxfun = selfA._parameters["MaximumNumberOfSteps"],
1767 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1768 ftol = selfA._parameters["CostDecrementTolerance"],
1769 messages = selfA._parameters["optmessages"],
1771 elif selfA._parameters["Minimizer"] == "CG":
1772 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1774 x0 = numpy.zeros(Xini.size),
1775 fprime = GradientOfCostFunction,
1777 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1778 gtol = selfA._parameters["GradientNormTolerance"],
1779 disp = selfA._parameters["optdisp"],
1782 elif selfA._parameters["Minimizer"] == "NCG":
1783 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1785 x0 = numpy.zeros(Xini.size),
1786 fprime = GradientOfCostFunction,
1788 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1789 avextol = selfA._parameters["CostDecrementTolerance"],
1790 disp = selfA._parameters["optdisp"],
1793 elif selfA._parameters["Minimizer"] == "BFGS":
1794 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1796 x0 = numpy.zeros(Xini.size),
1797 fprime = GradientOfCostFunction,
1799 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1800 gtol = selfA._parameters["GradientNormTolerance"],
1801 disp = selfA._parameters["optdisp"],
1805 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1807 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1808 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1810 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1811 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1812 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1814 Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
1817 DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
1818 iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
1820 # Obtention de l'analyse
1821 # ----------------------
1824 selfA.StoredVariables["Analysis"].store( Xa )
1826 if selfA._toStore("OMA") or \
1827 selfA._toStore("SigmaObs2") or \
1828 selfA._toStore("SimulationQuantiles") or \
1829 selfA._toStore("SimulatedObservationAtOptimum"):
1830 if selfA._toStore("SimulatedObservationAtCurrentState"):
1831 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1832 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1833 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1837 # Calcul de la covariance d'analyse
1838 # ---------------------------------
1839 if selfA._toStore("APosterioriCovariance") or \
1840 selfA._toStore("SimulationQuantiles") or \
1841 selfA._toStore("JacobianMatrixAtOptimum") or \
1842 selfA._toStore("KalmanGainAtOptimum"):
1843 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1844 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1845 if selfA._toStore("APosterioriCovariance") or \
1846 selfA._toStore("SimulationQuantiles") or \
1847 selfA._toStore("KalmanGainAtOptimum"):
1848 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1849 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1850 if selfA._toStore("APosterioriCovariance") or \
1851 selfA._toStore("SimulationQuantiles"):
1855 _ee = numpy.matrix(numpy.zeros(nb)).T
1857 _HtEE = numpy.dot(HtM,_ee)
1858 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
1859 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1860 HessienneI = numpy.matrix( HessienneI )
1862 if min(A.shape) != max(A.shape):
1863 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1864 if (numpy.diag(A) < 0).any():
1865 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1866 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1868 L = numpy.linalg.cholesky( A )
1870 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1871 if selfA._toStore("APosterioriCovariance"):
1872 selfA.StoredVariables["APosterioriCovariance"].store( A )
1873 if selfA._toStore("JacobianMatrixAtOptimum"):
1874 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1875 if selfA._toStore("KalmanGainAtOptimum"):
1876 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1877 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1878 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1880 # Calculs et/ou stockages supplémentaires
1881 # ---------------------------------------
1882 if selfA._toStore("Innovation") or \
1883 selfA._toStore("SigmaObs2") or \
1884 selfA._toStore("MahalanobisConsistency") or \
1885 selfA._toStore("OMB"):
1887 if selfA._toStore("Innovation"):
1888 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1889 if selfA._toStore("BMA"):
1890 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1891 if selfA._toStore("OMA"):
1892 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1893 if selfA._toStore("OMB"):
1894 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1895 if selfA._toStore("SigmaObs2"):
1896 TraceR = R.trace(Y.size)
1897 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1898 if selfA._toStore("MahalanobisConsistency"):
1899 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1900 if selfA._toStore("SimulationQuantiles"):
1901 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
1902 if selfA._toStore("SimulatedObservationAtBackground"):
1903 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1904 if selfA._toStore("SimulatedObservationAtOptimum"):
1905 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1909 # ==============================================================================
1910 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
1911 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1913 Maximum Likelihood Ensemble Filter
1915 if selfA._parameters["EstimationOf"] == "Parameters":
1916 selfA._parameters["StoreInternalVariables"] = True
1919 H = HO["Direct"].appliedControledFormTo
1921 if selfA._parameters["EstimationOf"] == "State":
1922 M = EM["Direct"].appliedControledFormTo
1924 if CM is not None and "Tangent" in CM and U is not None:
1925 Cm = CM["Tangent"].asMatrix(Xb)
1929 # Durée d'observation et tailles
1930 if hasattr(Y,"stepnumber"):
1931 duration = Y.stepnumber()
1932 __p = numpy.cumprod(Y.shape())[-1]
1935 __p = numpy.array(Y).size
1937 # Précalcul des inversions de B et R
1938 if selfA._parameters["StoreInternalVariables"] \
1939 or selfA._toStore("CostFunctionJ") \
1940 or selfA._toStore("CostFunctionJb") \
1941 or selfA._toStore("CostFunctionJo") \
1942 or selfA._toStore("CurrentOptimum") \
1943 or selfA._toStore("APosterioriCovariance"):
1948 __m = selfA._parameters["NumberOfMembers"]
1950 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1951 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1952 selfA.StoredVariables["Analysis"].store( Xb )
1953 if selfA._toStore("APosterioriCovariance"):
1954 if hasattr(B,"asfullmatrix"):
1955 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1957 selfA.StoredVariables["APosterioriCovariance"].store( B )
1958 selfA._setInternalState("seed", numpy.random.get_state())
1959 elif selfA._parameters["nextStep"]:
1960 Xn = selfA._getInternalState("Xn")
1962 previousJMinimum = numpy.finfo(float).max
1964 for step in range(duration-1):
1965 numpy.random.set_state(selfA._getInternalState("seed"))
1966 if hasattr(Y,"store"):
1967 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1969 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1972 if hasattr(U,"store") and len(U)>1:
1973 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1974 elif hasattr(U,"store") and len(U)==1:
1975 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1977 Un = numpy.asmatrix(numpy.ravel( U )).T
1981 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1982 Xn = CovarianceInflation( Xn,
1983 selfA._parameters["InflationType"],
1984 selfA._parameters["InflationFactor"],
1987 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1988 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1990 returnSerieAsArrayMatrix = True )
1991 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
1992 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1993 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1994 Xn_predicted = Xn_predicted + Cm * Un
1995 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1996 # --- > Par principe, M = Id, Q = 0
1997 Xn_predicted = EMX = Xn
1999 #--------------------------
2000 if VariantM == "MLEF13":
2001 Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
2002 EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
2003 Ua = numpy.identity(__m)
2007 Ta = numpy.identity(__m)
2008 vw = numpy.zeros(__m)
2009 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2010 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
2013 E1 = vx1 + _epsilon * EaX
2015 E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
2017 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2019 returnSerieAsArrayMatrix = True )
2020 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2023 EaY = (HE2 - vy2) / _epsilon
2025 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
2027 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
2028 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
2029 Deltaw = - numpy.linalg.solve(mH,GradJ)
2034 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2039 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2041 Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
2042 #--------------------------
2044 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2046 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2047 Xn = CovarianceInflation( Xn,
2048 selfA._parameters["InflationType"],
2049 selfA._parameters["InflationFactor"],
2052 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2053 #--------------------------
2054 selfA._setInternalState("Xn", Xn)
2055 selfA._setInternalState("seed", numpy.random.get_state())
2056 #--------------------------
2058 if selfA._parameters["StoreInternalVariables"] \
2059 or selfA._toStore("CostFunctionJ") \
2060 or selfA._toStore("CostFunctionJb") \
2061 or selfA._toStore("CostFunctionJo") \
2062 or selfA._toStore("APosterioriCovariance") \
2063 or selfA._toStore("InnovationAtCurrentAnalysis") \
2064 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2065 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2066 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2067 _Innovation = Ynpu - _HXa
2069 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2070 # ---> avec analysis
2071 selfA.StoredVariables["Analysis"].store( Xa )
2072 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2073 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2074 if selfA._toStore("InnovationAtCurrentAnalysis"):
2075 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2076 # ---> avec current state
2077 if selfA._parameters["StoreInternalVariables"] \
2078 or selfA._toStore("CurrentState"):
2079 selfA.StoredVariables["CurrentState"].store( Xn )
2080 if selfA._toStore("ForecastState"):
2081 selfA.StoredVariables["ForecastState"].store( EMX )
2082 if selfA._toStore("ForecastCovariance"):
2083 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
2084 if selfA._toStore("BMA"):
2085 selfA.StoredVariables["BMA"].store( EMX - Xa )
2086 if selfA._toStore("InnovationAtCurrentState"):
2087 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
2088 if selfA._toStore("SimulatedObservationAtCurrentState") \
2089 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2090 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2092 if selfA._parameters["StoreInternalVariables"] \
2093 or selfA._toStore("CostFunctionJ") \
2094 or selfA._toStore("CostFunctionJb") \
2095 or selfA._toStore("CostFunctionJo") \
2096 or selfA._toStore("CurrentOptimum") \
2097 or selfA._toStore("APosterioriCovariance"):
2098 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2099 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2101 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2102 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2103 selfA.StoredVariables["CostFunctionJ" ].store( J )
2105 if selfA._toStore("IndexOfOptimum") \
2106 or selfA._toStore("CurrentOptimum") \
2107 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2108 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2109 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2110 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2111 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2112 if selfA._toStore("IndexOfOptimum"):
2113 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2114 if selfA._toStore("CurrentOptimum"):
2115 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2116 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2117 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2118 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2119 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2120 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2121 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2122 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2123 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2124 if selfA._toStore("APosterioriCovariance"):
2125 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2126 if selfA._parameters["EstimationOf"] == "Parameters" \
2127 and J < previousJMinimum:
2128 previousJMinimum = J
2130 if selfA._toStore("APosterioriCovariance"):
2131 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
2133 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2134 # ----------------------------------------------------------------------
2135 if selfA._parameters["EstimationOf"] == "Parameters":
2136 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2137 selfA.StoredVariables["Analysis"].store( XaMin )
2138 if selfA._toStore("APosterioriCovariance"):
2139 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2140 if selfA._toStore("BMA"):
2141 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2145 # ==============================================================================
2157 Implémentation informatique de l'algorithme MMQR, basée sur la publication :
2158 David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
2159 Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
2162 # Recuperation des donnees et informations initiales
2163 # --------------------------------------------------
2164 variables = numpy.ravel( x0 )
2165 mesures = numpy.ravel( y )
2166 increment = sys.float_info[0]
2169 quantile = float(quantile)
2171 # Calcul des parametres du MM
2172 # ---------------------------
2173 tn = float(toler) / n
2174 e0 = -tn / math.log(tn)
2175 epsilon = (e0-tn)/(1+math.log(e0))
2177 # Calculs d'initialisation
2178 # ------------------------
2179 residus = mesures - numpy.ravel( func( variables ) )
2180 poids = 1./(epsilon+numpy.abs(residus))
2181 veps = 1. - 2. * quantile - residus * poids
2182 lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
2185 # Recherche iterative
2186 # -------------------
2187 while (increment > toler) and (iteration < maxfun) :
2190 Derivees = numpy.array(fprime(variables))
2191 Derivees = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
2192 DeriveesT = Derivees.transpose()
2193 M = numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
2194 SM = numpy.transpose(numpy.dot( DeriveesT , veps ))
2195 step = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
2197 variables = variables + step
2198 if bounds is not None:
2199 # Attention : boucle infinie à éviter si un intervalle est trop petit
2200 while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
2202 variables = variables - step
2203 residus = mesures - numpy.ravel( func(variables) )
2204 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2206 while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
2208 variables = variables - step
2209 residus = mesures - numpy.ravel( func(variables) )
2210 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2212 increment = lastsurrogate-surrogate
2213 poids = 1./(epsilon+numpy.abs(residus))
2214 veps = 1. - 2. * quantile - residus * poids
2215 lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
2219 Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
2221 return variables, Ecart, [n,p,iteration,increment,0]
2223 # ==============================================================================
2224 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
2226 3DVAR multi-pas et multi-méthodes
2230 if selfA._parameters["EstimationOf"] == "State":
2231 M = EM["Direct"].appliedTo
2233 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2234 Xn = numpy.ravel(Xb).reshape((-1,1))
2235 selfA.StoredVariables["Analysis"].store( Xn )
2236 if selfA._toStore("APosterioriCovariance"):
2237 if hasattr(B,"asfullmatrix"):
2238 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(Xn.size) )
2240 selfA.StoredVariables["APosterioriCovariance"].store( B )
2241 if selfA._toStore("ForecastState"):
2242 selfA.StoredVariables["ForecastState"].store( Xn )
2243 elif selfA._parameters["nextStep"]:
2244 Xn = selfA._getInternalState("Xn")
2246 Xn = numpy.ravel(Xb).reshape((-1,1))
2248 if hasattr(Y,"stepnumber"):
2249 duration = Y.stepnumber()
2254 for step in range(duration-1):
2255 if hasattr(Y,"store"):
2256 Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
2258 Ynpu = numpy.ravel( Y ).reshape((-1,1))
2260 if selfA._parameters["EstimationOf"] == "State": # Forecast
2261 Xn_predicted = M( Xn )
2262 if selfA._toStore("ForecastState"):
2263 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
2264 elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
2265 # --- > Par principe, M = Id, Q = 0
2267 Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
2269 oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
2271 Xn = selfA.StoredVariables["Analysis"][-1]
2272 #--------------------------
2273 selfA._setInternalState("Xn", Xn)
2277 # ==============================================================================
2278 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2287 Hm = HO["Direct"].appliedTo
2289 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
2290 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2291 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2294 HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2295 if Y.size != HXb.size:
2296 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2297 if max(Y.shape) != max(HXb.shape):
2298 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2300 if selfA._toStore("JacobianMatrixAtBackground"):
2301 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2302 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2303 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2305 Ht = HO["Tangent"].asMatrix(Xb)
2307 HBHTpR = R + Ht * BHT
2308 Innovation = Y - HXb
2310 # Point de démarrage de l'optimisation
2311 Xini = numpy.zeros(Xb.shape)
2313 # Définition de la fonction-coût
2314 # ------------------------------
2315 def CostFunction(w):
2316 _W = numpy.asmatrix(numpy.ravel( w )).T
2317 if selfA._parameters["StoreInternalVariables"] or \
2318 selfA._toStore("CurrentState") or \
2319 selfA._toStore("CurrentOptimum"):
2320 selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
2321 if selfA._toStore("SimulatedObservationAtCurrentState") or \
2322 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2323 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
2324 if selfA._toStore("InnovationAtCurrentState"):
2325 selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
2327 Jb = float( 0.5 * _W.T * HBHTpR * _W )
2328 Jo = float( - _W.T * Innovation )
2331 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2332 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2333 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2334 selfA.StoredVariables["CostFunctionJ" ].store( J )
2335 if selfA._toStore("IndexOfOptimum") or \
2336 selfA._toStore("CurrentOptimum") or \
2337 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2338 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2339 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2340 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2341 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2342 if selfA._toStore("IndexOfOptimum"):
2343 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2344 if selfA._toStore("CurrentOptimum"):
2345 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2346 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2347 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2348 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2349 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2350 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2351 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2352 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2353 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2356 def GradientOfCostFunction(w):
2357 _W = numpy.asmatrix(numpy.ravel( w )).T
2358 GradJb = HBHTpR * _W
2359 GradJo = - Innovation
2360 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2363 # Minimisation de la fonctionnelle
2364 # --------------------------------
2365 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2367 if selfA._parameters["Minimizer"] == "LBFGSB":
2368 if "0.19" <= scipy.version.version <= "1.1.0":
2369 import lbfgsbhlt as optimiseur
2371 import scipy.optimize as optimiseur
2372 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2373 func = CostFunction,
2375 fprime = GradientOfCostFunction,
2377 bounds = selfA._parameters["Bounds"],
2378 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
2379 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
2380 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2381 iprint = selfA._parameters["optiprint"],
2383 nfeval = Informations['funcalls']
2384 rc = Informations['warnflag']
2385 elif selfA._parameters["Minimizer"] == "TNC":
2386 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2387 func = CostFunction,
2389 fprime = GradientOfCostFunction,
2391 bounds = selfA._parameters["Bounds"],
2392 maxfun = selfA._parameters["MaximumNumberOfSteps"],
2393 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2394 ftol = selfA._parameters["CostDecrementTolerance"],
2395 messages = selfA._parameters["optmessages"],
2397 elif selfA._parameters["Minimizer"] == "CG":
2398 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2401 fprime = GradientOfCostFunction,
2403 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2404 gtol = selfA._parameters["GradientNormTolerance"],
2405 disp = selfA._parameters["optdisp"],
2408 elif selfA._parameters["Minimizer"] == "NCG":
2409 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2412 fprime = GradientOfCostFunction,
2414 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2415 avextol = selfA._parameters["CostDecrementTolerance"],
2416 disp = selfA._parameters["optdisp"],
2419 elif selfA._parameters["Minimizer"] == "BFGS":
2420 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2423 fprime = GradientOfCostFunction,
2425 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2426 gtol = selfA._parameters["GradientNormTolerance"],
2427 disp = selfA._parameters["optdisp"],
2431 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2433 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2434 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2436 # Correction pour pallier a un bug de TNC sur le retour du Minimum
2437 # ----------------------------------------------------------------
2438 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2439 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2440 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
2442 Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
2444 # Obtention de l'analyse
2445 # ----------------------
2448 selfA.StoredVariables["Analysis"].store( Xa )
2450 if selfA._toStore("OMA") or \
2451 selfA._toStore("SigmaObs2") or \
2452 selfA._toStore("SimulationQuantiles") or \
2453 selfA._toStore("SimulatedObservationAtOptimum"):
2454 if selfA._toStore("SimulatedObservationAtCurrentState"):
2455 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2456 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2457 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2461 # Calcul de la covariance d'analyse
2462 # ---------------------------------
2463 if selfA._toStore("APosterioriCovariance") or \
2464 selfA._toStore("SimulationQuantiles") or \
2465 selfA._toStore("JacobianMatrixAtOptimum") or \
2466 selfA._toStore("KalmanGainAtOptimum"):
2467 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2468 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2469 if selfA._toStore("APosterioriCovariance") or \
2470 selfA._toStore("SimulationQuantiles") or \
2471 selfA._toStore("KalmanGainAtOptimum"):
2472 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2473 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2474 if selfA._toStore("APosterioriCovariance") or \
2475 selfA._toStore("SimulationQuantiles"):
2481 _ee = numpy.matrix(numpy.zeros(nb)).T
2483 _HtEE = numpy.dot(HtM,_ee)
2484 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
2485 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2486 HessienneI = numpy.matrix( HessienneI )
2488 if min(A.shape) != max(A.shape):
2489 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2490 if (numpy.diag(A) < 0).any():
2491 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2492 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2494 L = numpy.linalg.cholesky( A )
2496 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2497 if selfA._toStore("APosterioriCovariance"):
2498 selfA.StoredVariables["APosterioriCovariance"].store( A )
2499 if selfA._toStore("JacobianMatrixAtOptimum"):
2500 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2501 if selfA._toStore("KalmanGainAtOptimum"):
2502 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2503 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2504 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2506 # Calculs et/ou stockages supplémentaires
2507 # ---------------------------------------
2508 if selfA._toStore("Innovation") or \
2509 selfA._toStore("SigmaObs2") or \
2510 selfA._toStore("MahalanobisConsistency") or \
2511 selfA._toStore("OMB"):
2513 if selfA._toStore("Innovation"):
2514 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2515 if selfA._toStore("BMA"):
2516 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2517 if selfA._toStore("OMA"):
2518 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2519 if selfA._toStore("OMB"):
2520 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2521 if selfA._toStore("SigmaObs2"):
2522 TraceR = R.trace(Y.size)
2523 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2524 if selfA._toStore("MahalanobisConsistency"):
2525 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2526 if selfA._toStore("SimulationQuantiles"):
2527 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
2528 if selfA._toStore("SimulatedObservationAtBackground"):
2529 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2530 if selfA._toStore("SimulatedObservationAtOptimum"):
2531 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2535 # ==============================================================================
2536 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16"):
2540 if selfA._parameters["EstimationOf"] == "Parameters":
2541 selfA._parameters["StoreInternalVariables"] = True
2544 H = HO["Direct"].appliedControledFormTo
2546 if selfA._parameters["EstimationOf"] == "State":
2547 M = EM["Direct"].appliedControledFormTo
2549 if CM is not None and "Tangent" in CM and U is not None:
2550 Cm = CM["Tangent"].asMatrix(Xb)
2554 # Durée d'observation et tailles
2555 if hasattr(Y,"stepnumber"):
2556 duration = Y.stepnumber()
2557 __p = numpy.cumprod(Y.shape())[-1]
2560 __p = numpy.array(Y).size
2562 # Précalcul des inversions de B et R
2563 if selfA._parameters["StoreInternalVariables"] \
2564 or selfA._toStore("CostFunctionJ") \
2565 or selfA._toStore("CostFunctionJb") \
2566 or selfA._toStore("CostFunctionJo") \
2567 or selfA._toStore("CurrentOptimum") \
2568 or selfA._toStore("APosterioriCovariance"):
2573 __m = selfA._parameters["NumberOfMembers"]
2575 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2578 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2579 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2580 selfA.StoredVariables["Analysis"].store( Xb )
2581 if selfA._toStore("APosterioriCovariance"):
2582 if hasattr(B,"asfullmatrix"):
2583 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
2585 selfA.StoredVariables["APosterioriCovariance"].store( B )
2586 selfA._setInternalState("seed", numpy.random.get_state())
2587 elif selfA._parameters["nextStep"]:
2588 Xn = selfA._getInternalState("Xn")
2590 previousJMinimum = numpy.finfo(float).max
2592 for step in range(duration-1):
2593 numpy.random.set_state(selfA._getInternalState("seed"))
2594 if hasattr(Y,"store"):
2595 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2597 Ynpu = numpy.ravel( Y ).reshape((__p,1))
2600 if hasattr(U,"store") and len(U)>1:
2601 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2602 elif hasattr(U,"store") and len(U)==1:
2603 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2605 Un = numpy.asmatrix(numpy.ravel( U )).T
2609 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2610 Xn = CovarianceInflation( Xn,
2611 selfA._parameters["InflationType"],
2612 selfA._parameters["InflationFactor"],
2615 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2616 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2618 returnSerieAsArrayMatrix = True )
2619 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
2620 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2622 returnSerieAsArrayMatrix = True )
2623 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2624 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2625 Xn_predicted = Xn_predicted + Cm * Un
2626 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2627 # --- > Par principe, M = Id, Q = 0
2628 Xn_predicted = EMX = Xn
2629 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2631 returnSerieAsArrayMatrix = True )
2633 # Mean of forecast and observation of forecast
2634 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2635 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2637 #--------------------------
2638 if VariantM == "KalmanFilterFormula05":
2639 PfHT, HPfHT = 0., 0.
2640 for i in range(__m):
2641 Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
2642 Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
2643 PfHT += Exfi * Eyfi.T
2644 HPfHT += Eyfi * Eyfi.T
2645 PfHT = (1./(__m-1)) * PfHT
2646 HPfHT = (1./(__m-1)) * HPfHT
2647 Kn = PfHT * ( R + HPfHT ).I
2650 for i in range(__m):
2651 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
2652 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
2653 #--------------------------
2654 elif VariantM == "KalmanFilterFormula16":
2655 EpY = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
2656 EpYm = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2658 EaX = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
2659 EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
2661 Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
2663 for i in range(__m):
2664 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
2665 #--------------------------
2667 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2669 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2670 Xn = CovarianceInflation( Xn,
2671 selfA._parameters["InflationType"],
2672 selfA._parameters["InflationFactor"],
2675 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2676 #--------------------------
2677 selfA._setInternalState("Xn", Xn)
2678 selfA._setInternalState("seed", numpy.random.get_state())
2679 #--------------------------
2681 if selfA._parameters["StoreInternalVariables"] \
2682 or selfA._toStore("CostFunctionJ") \
2683 or selfA._toStore("CostFunctionJb") \
2684 or selfA._toStore("CostFunctionJo") \
2685 or selfA._toStore("APosterioriCovariance") \
2686 or selfA._toStore("InnovationAtCurrentAnalysis") \
2687 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2688 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2689 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2690 _Innovation = Ynpu - _HXa
2692 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2693 # ---> avec analysis
2694 selfA.StoredVariables["Analysis"].store( Xa )
2695 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2696 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2697 if selfA._toStore("InnovationAtCurrentAnalysis"):
2698 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2699 # ---> avec current state
2700 if selfA._parameters["StoreInternalVariables"] \
2701 or selfA._toStore("CurrentState"):
2702 selfA.StoredVariables["CurrentState"].store( Xn )
2703 if selfA._toStore("ForecastState"):
2704 selfA.StoredVariables["ForecastState"].store( EMX )
2705 if selfA._toStore("ForecastCovariance"):
2706 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
2707 if selfA._toStore("BMA"):
2708 selfA.StoredVariables["BMA"].store( EMX - Xa )
2709 if selfA._toStore("InnovationAtCurrentState"):
2710 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
2711 if selfA._toStore("SimulatedObservationAtCurrentState") \
2712 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2713 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2715 if selfA._parameters["StoreInternalVariables"] \
2716 or selfA._toStore("CostFunctionJ") \
2717 or selfA._toStore("CostFunctionJb") \
2718 or selfA._toStore("CostFunctionJo") \
2719 or selfA._toStore("CurrentOptimum") \
2720 or selfA._toStore("APosterioriCovariance"):
2721 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2722 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2724 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2725 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2726 selfA.StoredVariables["CostFunctionJ" ].store( J )
2728 if selfA._toStore("IndexOfOptimum") \
2729 or selfA._toStore("CurrentOptimum") \
2730 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2731 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2732 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2733 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2734 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2735 if selfA._toStore("IndexOfOptimum"):
2736 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2737 if selfA._toStore("CurrentOptimum"):
2738 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2739 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2740 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2741 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2742 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2743 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2744 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2745 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2746 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2747 if selfA._toStore("APosterioriCovariance"):
2748 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2749 if selfA._parameters["EstimationOf"] == "Parameters" \
2750 and J < previousJMinimum:
2751 previousJMinimum = J
2753 if selfA._toStore("APosterioriCovariance"):
2754 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
2756 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2757 # ----------------------------------------------------------------------
2758 if selfA._parameters["EstimationOf"] == "Parameters":
2759 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2760 selfA.StoredVariables["Analysis"].store( XaMin )
2761 if selfA._toStore("APosterioriCovariance"):
2762 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2763 if selfA._toStore("BMA"):
2764 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2768 # ==============================================================================
2769 def stdkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2771 Standard Kalman Filter
2773 if selfA._parameters["EstimationOf"] == "Parameters":
2774 selfA._parameters["StoreInternalVariables"] = True
2778 Ht = HO["Tangent"].asMatrix(Xb)
2779 Ha = HO["Adjoint"].asMatrix(Xb)
2781 if selfA._parameters["EstimationOf"] == "State":
2782 Mt = EM["Tangent"].asMatrix(Xb)
2783 Ma = EM["Adjoint"].asMatrix(Xb)
2785 if CM is not None and "Tangent" in CM and U is not None:
2786 Cm = CM["Tangent"].asMatrix(Xb)
2790 # Durée d'observation et tailles
2791 if hasattr(Y,"stepnumber"):
2792 duration = Y.stepnumber()
2793 __p = numpy.cumprod(Y.shape())[-1]
2796 __p = numpy.array(Y).size
2798 # Précalcul des inversions de B et R
2799 if selfA._parameters["StoreInternalVariables"] \
2800 or selfA._toStore("CostFunctionJ") \
2801 or selfA._toStore("CostFunctionJb") \
2802 or selfA._toStore("CostFunctionJo") \
2803 or selfA._toStore("CurrentOptimum") \
2804 or selfA._toStore("APosterioriCovariance"):
2810 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2813 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2814 selfA.StoredVariables["Analysis"].store( Xb )
2815 if selfA._toStore("APosterioriCovariance"):
2816 if hasattr(B,"asfullmatrix"):
2817 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
2819 selfA.StoredVariables["APosterioriCovariance"].store( B )
2820 selfA._setInternalState("seed", numpy.random.get_state())
2821 elif selfA._parameters["nextStep"]:
2822 Xn = selfA._getInternalState("Xn")
2823 Pn = selfA._getInternalState("Pn")
2825 if selfA._parameters["EstimationOf"] == "Parameters":
2827 previousJMinimum = numpy.finfo(float).max
2829 for step in range(duration-1):
2830 if hasattr(Y,"store"):
2831 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2833 Ynpu = numpy.ravel( Y ).reshape((__p,1))
2836 if hasattr(U,"store") and len(U)>1:
2837 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2838 elif hasattr(U,"store") and len(U)==1:
2839 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2841 Un = numpy.asmatrix(numpy.ravel( U )).T
2845 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2846 Xn_predicted = Mt * Xn
2847 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2848 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2849 Xn_predicted = Xn_predicted + Cm * Un
2850 Pn_predicted = Q + Mt * Pn * Ma
2851 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2852 # --- > Par principe, M = Id, Q = 0
2856 if selfA._parameters["EstimationOf"] == "State":
2857 HX_predicted = Ht * Xn_predicted
2858 _Innovation = Ynpu - HX_predicted
2859 elif selfA._parameters["EstimationOf"] == "Parameters":
2860 HX_predicted = Ht * Xn_predicted
2861 _Innovation = Ynpu - HX_predicted
2862 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
2863 _Innovation = _Innovation - Cm * Un
2865 Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
2866 Xn = Xn_predicted + Kn * _Innovation
2867 Pn = Pn_predicted - Kn * Ht * Pn_predicted
2870 #--------------------------
2871 selfA._setInternalState("Xn", Xn)
2872 selfA._setInternalState("Pn", Pn)
2873 #--------------------------
2875 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2876 # ---> avec analysis
2877 selfA.StoredVariables["Analysis"].store( Xa )
2878 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2879 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Ht * Xa )
2880 if selfA._toStore("InnovationAtCurrentAnalysis"):
2881 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2882 # ---> avec current state
2883 if selfA._parameters["StoreInternalVariables"] \
2884 or selfA._toStore("CurrentState"):
2885 selfA.StoredVariables["CurrentState"].store( Xn )
2886 if selfA._toStore("ForecastState"):
2887 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
2888 if selfA._toStore("ForecastCovariance"):
2889 selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
2890 if selfA._toStore("BMA"):
2891 selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
2892 if selfA._toStore("InnovationAtCurrentState"):
2893 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
2894 if selfA._toStore("SimulatedObservationAtCurrentState") \
2895 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2896 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2898 if selfA._parameters["StoreInternalVariables"] \
2899 or selfA._toStore("CostFunctionJ") \
2900 or selfA._toStore("CostFunctionJb") \
2901 or selfA._toStore("CostFunctionJo") \
2902 or selfA._toStore("CurrentOptimum") \
2903 or selfA._toStore("APosterioriCovariance"):
2904 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2905 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2907 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2908 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2909 selfA.StoredVariables["CostFunctionJ" ].store( J )
2911 if selfA._toStore("IndexOfOptimum") \
2912 or selfA._toStore("CurrentOptimum") \
2913 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2914 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2915 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2916 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2917 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2918 if selfA._toStore("IndexOfOptimum"):
2919 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2920 if selfA._toStore("CurrentOptimum"):
2921 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2922 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2923 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2924 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2925 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2926 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2927 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2928 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2929 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2930 if selfA._toStore("APosterioriCovariance"):
2931 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2932 if selfA._parameters["EstimationOf"] == "Parameters" \
2933 and J < previousJMinimum:
2934 previousJMinimum = J
2936 if selfA._toStore("APosterioriCovariance"):
2937 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
2939 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2940 # ----------------------------------------------------------------------
2941 if selfA._parameters["EstimationOf"] == "Parameters":
2942 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2943 selfA.StoredVariables["Analysis"].store( XaMin )
2944 if selfA._toStore("APosterioriCovariance"):
2945 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2946 if selfA._toStore("BMA"):
2947 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2951 # ==============================================================================
2952 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2961 Hm = HO["Direct"].appliedTo
2962 Ha = HO["Adjoint"].appliedInXTo
2964 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
2965 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2966 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2969 HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2970 if Y.size != HXb.size:
2971 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2972 if max(Y.shape) != max(HXb.shape):
2973 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2975 if selfA._toStore("JacobianMatrixAtBackground"):
2976 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2977 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2978 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2980 # Précalcul des inversions de B et R
2984 # Point de démarrage de l'optimisation
2985 Xini = selfA._parameters["InitializationPoint"]
2987 # Définition de la fonction-coût
2988 # ------------------------------
2989 def CostFunction(x):
2990 _X = numpy.asmatrix(numpy.ravel( x )).T
2991 if selfA._parameters["StoreInternalVariables"] or \
2992 selfA._toStore("CurrentState") or \
2993 selfA._toStore("CurrentOptimum"):
2994 selfA.StoredVariables["CurrentState"].store( _X )
2996 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2997 _Innovation = Y - _HX
2998 if selfA._toStore("SimulatedObservationAtCurrentState") or \
2999 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3000 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
3001 if selfA._toStore("InnovationAtCurrentState"):
3002 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3004 Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
3005 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
3008 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3009 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3010 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3011 selfA.StoredVariables["CostFunctionJ" ].store( J )
3012 if selfA._toStore("IndexOfOptimum") or \
3013 selfA._toStore("CurrentOptimum") or \
3014 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3015 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3016 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
3017 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3018 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3019 if selfA._toStore("IndexOfOptimum"):
3020 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3021 if selfA._toStore("CurrentOptimum"):
3022 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3023 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3024 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
3025 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3026 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3027 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3028 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3029 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3030 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3033 def GradientOfCostFunction(x):
3034 _X = numpy.asmatrix(numpy.ravel( x )).T
3036 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
3037 GradJb = BI * (_X - Xb)
3038 GradJo = - Ha( (_X, RI * (Y - _HX)) )
3039 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
3042 # Minimisation de la fonctionnelle
3043 # --------------------------------
3044 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3046 if selfA._parameters["Minimizer"] == "LBFGSB":
3047 if "0.19" <= scipy.version.version <= "1.1.0":
3048 import lbfgsbhlt as optimiseur
3050 import scipy.optimize as optimiseur
3051 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3052 func = CostFunction,
3054 fprime = GradientOfCostFunction,
3056 bounds = selfA._parameters["Bounds"],
3057 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
3058 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
3059 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3060 iprint = selfA._parameters["optiprint"],
3062 nfeval = Informations['funcalls']
3063 rc = Informations['warnflag']
3064 elif selfA._parameters["Minimizer"] == "TNC":
3065 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3066 func = CostFunction,
3068 fprime = GradientOfCostFunction,
3070 bounds = selfA._parameters["Bounds"],
3071 maxfun = selfA._parameters["MaximumNumberOfSteps"],
3072 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3073 ftol = selfA._parameters["CostDecrementTolerance"],
3074 messages = selfA._parameters["optmessages"],
3076 elif selfA._parameters["Minimizer"] == "CG":
3077 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3080 fprime = GradientOfCostFunction,
3082 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3083 gtol = selfA._parameters["GradientNormTolerance"],
3084 disp = selfA._parameters["optdisp"],
3087 elif selfA._parameters["Minimizer"] == "NCG":
3088 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3091 fprime = GradientOfCostFunction,
3093 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3094 avextol = selfA._parameters["CostDecrementTolerance"],
3095 disp = selfA._parameters["optdisp"],
3098 elif selfA._parameters["Minimizer"] == "BFGS":
3099 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3102 fprime = GradientOfCostFunction,
3104 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3105 gtol = selfA._parameters["GradientNormTolerance"],
3106 disp = selfA._parameters["optdisp"],
3110 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3112 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3113 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3115 # Correction pour pallier a un bug de TNC sur le retour du Minimum
3116 # ----------------------------------------------------------------
3117 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3118 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3120 # Obtention de l'analyse
3121 # ----------------------
3122 Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
3124 selfA.StoredVariables["Analysis"].store( Xa )
3126 if selfA._toStore("OMA") or \
3127 selfA._toStore("SigmaObs2") or \
3128 selfA._toStore("SimulationQuantiles") or \
3129 selfA._toStore("SimulatedObservationAtOptimum"):
3130 if selfA._toStore("SimulatedObservationAtCurrentState"):
3131 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3132 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3133 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3137 # Calcul de la covariance d'analyse
3138 # ---------------------------------
3139 if selfA._toStore("APosterioriCovariance") or \
3140 selfA._toStore("SimulationQuantiles") or \
3141 selfA._toStore("JacobianMatrixAtOptimum") or \
3142 selfA._toStore("KalmanGainAtOptimum"):
3143 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3144 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3145 if selfA._toStore("APosterioriCovariance") or \
3146 selfA._toStore("SimulationQuantiles") or \
3147 selfA._toStore("KalmanGainAtOptimum"):
3148 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3149 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3150 if selfA._toStore("APosterioriCovariance") or \
3151 selfA._toStore("SimulationQuantiles"):
3155 _ee = numpy.matrix(numpy.zeros(nb)).T
3157 _HtEE = numpy.dot(HtM,_ee)
3158 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
3159 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
3160 HessienneI = numpy.matrix( HessienneI )
3162 if min(A.shape) != max(A.shape):
3163 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
3164 if (numpy.diag(A) < 0).any():
3165 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
3166 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
3168 L = numpy.linalg.cholesky( A )
3170 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
3171 if selfA._toStore("APosterioriCovariance"):
3172 selfA.StoredVariables["APosterioriCovariance"].store( A )
3173 if selfA._toStore("JacobianMatrixAtOptimum"):
3174 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3175 if selfA._toStore("KalmanGainAtOptimum"):
3176 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3177 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3178 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3180 # Calculs et/ou stockages supplémentaires
3181 # ---------------------------------------
3182 if selfA._toStore("Innovation") or \
3183 selfA._toStore("SigmaObs2") or \
3184 selfA._toStore("MahalanobisConsistency") or \
3185 selfA._toStore("OMB"):
3187 if selfA._toStore("Innovation"):
3188 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
3189 if selfA._toStore("BMA"):
3190 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3191 if selfA._toStore("OMA"):
3192 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3193 if selfA._toStore("OMB"):
3194 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
3195 if selfA._toStore("SigmaObs2"):
3196 TraceR = R.trace(Y.size)
3197 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
3198 if selfA._toStore("MahalanobisConsistency"):
3199 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3200 if selfA._toStore("SimulationQuantiles"):
3201 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
3202 if selfA._toStore("SimulatedObservationAtBackground"):
3203 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
3204 if selfA._toStore("SimulatedObservationAtOptimum"):
3205 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
3209 # ==============================================================================
3210 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3219 Hm = HO["Direct"].appliedControledFormTo
3220 Mm = EM["Direct"].appliedControledFormTo
3222 if CM is not None and "Tangent" in CM and U is not None:
3223 Cm = CM["Tangent"].asMatrix(Xb)
3229 if hasattr(U,"store") and 1<=_step<len(U) :
3230 _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
3231 elif hasattr(U,"store") and len(U)==1:
3232 _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
3234 _Un = numpy.asmatrix(numpy.ravel( U )).T
3239 if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
3240 _Cm = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
3246 # Remarque : les observations sont exploitées à partir du pas de temps
3247 # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
3248 # Donc le pas 0 n'est pas utilisé puisque la première étape commence
3249 # avec l'observation du pas 1.
3251 # Nombre de pas identique au nombre de pas d'observations
3252 if hasattr(Y,"stepnumber"):
3253 duration = Y.stepnumber()
3257 # Précalcul des inversions de B et R
3261 # Point de démarrage de l'optimisation
3262 Xini = selfA._parameters["InitializationPoint"]
3264 # Définition de la fonction-coût
3265 # ------------------------------
3266 selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
3267 selfA.DirectInnovation = [None,] # Le pas 0 n'est pas observé
3268 def CostFunction(x):
3269 _X = numpy.asmatrix(numpy.ravel( x )).T
3270 if selfA._parameters["StoreInternalVariables"] or \
3271 selfA._toStore("CurrentState") or \
3272 selfA._toStore("CurrentOptimum"):
3273 selfA.StoredVariables["CurrentState"].store( _X )
3274 Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
3275 selfA.DirectCalculation = [None,]
3276 selfA.DirectInnovation = [None,]
3279 for step in range(0,duration-1):
3280 if hasattr(Y,"store"):
3281 _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
3283 _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
3287 if selfA._parameters["EstimationOf"] == "State":
3288 _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
3289 elif selfA._parameters["EstimationOf"] == "Parameters":
3292 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3293 _Xn = numpy.max(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
3294 _Xn = numpy.min(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
3296 # Etape de différence aux observations
3297 if selfA._parameters["EstimationOf"] == "State":
3298 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
3299 elif selfA._parameters["EstimationOf"] == "Parameters":
3300 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
3302 # Stockage de l'état
3303 selfA.DirectCalculation.append( _Xn )
3304 selfA.DirectInnovation.append( _YmHMX )
3306 # Ajout dans la fonctionnelle d'observation
3307 Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
3310 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3311 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3312 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3313 selfA.StoredVariables["CostFunctionJ" ].store( J )
3314 if selfA._toStore("IndexOfOptimum") or \
3315 selfA._toStore("CurrentOptimum") or \
3316 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3317 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3318 selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3319 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3320 if selfA._toStore("IndexOfOptimum"):
3321 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3322 if selfA._toStore("CurrentOptimum"):
3323 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3324 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3325 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3326 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3327 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3328 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3329 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3332 def GradientOfCostFunction(x):
3333 _X = numpy.asmatrix(numpy.ravel( x )).T
3334 GradJb = BI * (_X - Xb)
3336 for step in range(duration-1,0,-1):
3337 # Étape de récupération du dernier stockage de l'évolution
3338 _Xn = selfA.DirectCalculation.pop()
3339 # Étape de récupération du dernier stockage de l'innovation
3340 _YmHMX = selfA.DirectInnovation.pop()
3341 # Calcul des adjoints
3342 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
3343 Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
3344 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
3345 Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
3346 # Calcul du gradient par état adjoint
3347 GradJo = GradJo + Ha * RI * _YmHMX # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
3348 GradJo = Ma * GradJo # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
3349 GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
3352 # Minimisation de la fonctionnelle
3353 # --------------------------------
3354 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3356 if selfA._parameters["Minimizer"] == "LBFGSB":
3357 if "0.19" <= scipy.version.version <= "1.1.0":
3358 import lbfgsbhlt as optimiseur
3360 import scipy.optimize as optimiseur
3361 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3362 func = CostFunction,
3364 fprime = GradientOfCostFunction,
3366 bounds = selfA._parameters["Bounds"],
3367 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
3368 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
3369 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3370 iprint = selfA._parameters["optiprint"],
3372 nfeval = Informations['funcalls']
3373 rc = Informations['warnflag']
3374 elif selfA._parameters["Minimizer"] == "TNC":
3375 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3376 func = CostFunction,
3378 fprime = GradientOfCostFunction,
3380 bounds = selfA._parameters["Bounds"],
3381 maxfun = selfA._parameters["MaximumNumberOfSteps"],
3382 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3383 ftol = selfA._parameters["CostDecrementTolerance"],
3384 messages = selfA._parameters["optmessages"],
3386 elif selfA._parameters["Minimizer"] == "CG":
3387 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3390 fprime = GradientOfCostFunction,
3392 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3393 gtol = selfA._parameters["GradientNormTolerance"],
3394 disp = selfA._parameters["optdisp"],
3397 elif selfA._parameters["Minimizer"] == "NCG":
3398 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3401 fprime = GradientOfCostFunction,
3403 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3404 avextol = selfA._parameters["CostDecrementTolerance"],
3405 disp = selfA._parameters["optdisp"],
3408 elif selfA._parameters["Minimizer"] == "BFGS":
3409 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3412 fprime = GradientOfCostFunction,
3414 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3415 gtol = selfA._parameters["GradientNormTolerance"],
3416 disp = selfA._parameters["optdisp"],
3420 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3422 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3423 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3425 # Correction pour pallier a un bug de TNC sur le retour du Minimum
3426 # ----------------------------------------------------------------
3427 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3428 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3430 # Obtention de l'analyse
3431 # ----------------------
3432 Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
3434 selfA.StoredVariables["Analysis"].store( Xa )
3436 # Calculs et/ou stockages supplémentaires
3437 # ---------------------------------------
3438 if selfA._toStore("BMA"):
3439 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3443 # ==============================================================================
3444 def uckf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3446 Unscented Kalman Filter
3448 if selfA._parameters["EstimationOf"] == "Parameters":
3449 selfA._parameters["StoreInternalVariables"] = True
3452 Alpha = selfA._parameters["Alpha"]
3453 Beta = selfA._parameters["Beta"]
3454 if selfA._parameters["Kappa"] == 0:
3455 if selfA._parameters["EstimationOf"] == "State":
3457 elif selfA._parameters["EstimationOf"] == "Parameters":
3460 Kappa = selfA._parameters["Kappa"]
3461 Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
3462 Gamma = math.sqrt( L + Lambda )
3466 for i in range(2*L):
3467 Ww.append( 1. / (2.*(L + Lambda)) )
3469 Wm = numpy.array( Ww )
3470 Wm[0] = Lambda / (L + Lambda)
3471 Wc = numpy.array( Ww )
3472 Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
3475 Hm = HO["Direct"].appliedControledFormTo
3477 if selfA._parameters["EstimationOf"] == "State":
3478 Mm = EM["Direct"].appliedControledFormTo
3480 if CM is not None and "Tangent" in CM and U is not None:
3481 Cm = CM["Tangent"].asMatrix(Xb)
3485 # Durée d'observation et tailles
3486 if hasattr(Y,"stepnumber"):
3487 duration = Y.stepnumber()
3488 __p = numpy.cumprod(Y.shape())[-1]
3491 __p = numpy.array(Y).size
3493 # Précalcul des inversions de B et R
3494 if selfA._parameters["StoreInternalVariables"] \
3495 or selfA._toStore("CostFunctionJ") \
3496 or selfA._toStore("CostFunctionJb") \
3497 or selfA._toStore("CostFunctionJo"):
3503 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
3505 if hasattr(B,"asfullmatrix"):
3506 Pn = B.asfullmatrix(__n)
3509 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3510 selfA.StoredVariables["Analysis"].store( Xb )
3511 if selfA._toStore("APosterioriCovariance"):
3512 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3513 elif selfA._parameters["nextStep"]:
3514 Xn = selfA._getInternalState("Xn")
3515 Pn = selfA._getInternalState("Pn")
3517 if selfA._parameters["EstimationOf"] == "Parameters":
3519 previousJMinimum = numpy.finfo(float).max
3521 for step in range(duration-1):
3522 if hasattr(Y,"store"):
3523 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
3525 Ynpu = numpy.ravel( Y ).reshape((__p,1))
3528 if hasattr(U,"store") and len(U)>1:
3529 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
3530 elif hasattr(U,"store") and len(U)==1:
3531 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
3533 Un = numpy.asmatrix(numpy.ravel( U )).T
3537 Pndemi = numpy.linalg.cholesky(Pn)
3538 Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
3539 nbSpts = 2*Xn.size+1
3541 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3542 for point in range(nbSpts):
3543 Xnp[:,point] = numpy.max(numpy.hstack((Xnp[:,point],numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
3544 Xnp[:,point] = numpy.min(numpy.hstack((Xnp[:,point],numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
3547 for point in range(nbSpts):
3548 if selfA._parameters["EstimationOf"] == "State":
3549 XEtnnpi = numpy.asmatrix(numpy.ravel( Mm( (Xnp[:,point], Un) ) )).T
3550 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
3551 Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
3552 XEtnnpi = XEtnnpi + Cm * Un
3553 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3554 XEtnnpi = numpy.max(numpy.hstack((XEtnnpi,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
3555 XEtnnpi = numpy.min(numpy.hstack((XEtnnpi,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
3556 elif selfA._parameters["EstimationOf"] == "Parameters":
3557 # --- > Par principe, M = Id, Q = 0
3558 XEtnnpi = Xnp[:,point]
3559 XEtnnp.append( XEtnnpi )
3560 XEtnnp = numpy.hstack( XEtnnp )
3562 Xncm = numpy.matrix( XEtnnp.getA()*numpy.array(Wm) ).sum(axis=1)
3564 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3565 Xncm = numpy.max(numpy.hstack((Xncm,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
3566 Xncm = numpy.min(numpy.hstack((Xncm,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
3568 if selfA._parameters["EstimationOf"] == "State": Pnm = Q
3569 elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
3570 for point in range(nbSpts):
3571 Pnm += Wc[i] * (XEtnnp[:,point]-Xncm) * (XEtnnp[:,point]-Xncm).T
3573 if selfA._parameters["EstimationOf"] == "Parameters" and selfA._parameters["Bounds"] is not None:
3574 Pnmdemi = selfA._parameters["Reconditioner"] * numpy.linalg.cholesky(Pnm)
3576 Pnmdemi = numpy.linalg.cholesky(Pnm)
3578 Xnnp = numpy.hstack([Xncm, Xncm+Gamma*Pnmdemi, Xncm-Gamma*Pnmdemi])
3580 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3581 for point in range(nbSpts):
3582 Xnnp[:,point] = numpy.max(numpy.hstack((Xnnp[:,point],numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
3583 Xnnp[:,point] = numpy.min(numpy.hstack((Xnnp[:,point],numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
3586 for point in range(nbSpts):
3587 if selfA._parameters["EstimationOf"] == "State":
3588 Ynnpi = numpy.asmatrix(numpy.ravel( Hm( (Xnnp[:,point], None) ) )).T
3589 elif selfA._parameters["EstimationOf"] == "Parameters":
3590 Ynnpi = numpy.asmatrix(numpy.ravel( Hm( (Xnnp[:,point], Un) ) )).T
3591 Ynnp.append( Ynnpi )
3592 Ynnp = numpy.hstack( Ynnp )
3594 Yncm = numpy.matrix( Ynnp.getA()*numpy.array(Wm) ).sum(axis=1)
3598 for point in range(nbSpts):
3599 Pyyn += Wc[i] * (Ynnp[:,point]-Yncm) * (Ynnp[:,point]-Yncm).T
3600 Pxyn += Wc[i] * (Xnnp[:,point]-Xncm) * (Ynnp[:,point]-Yncm).T
3602 _Innovation = Ynpu - Yncm
3603 if selfA._parameters["EstimationOf"] == "Parameters":
3604 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
3605 _Innovation = _Innovation - Cm * Un
3608 Xn = Xncm + Kn * _Innovation
3609 Pn = Pnm - Kn * Pyyn * Kn.T
3611 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3612 Xn = numpy.max(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
3613 Xn = numpy.min(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
3616 #--------------------------
3617 selfA._setInternalState("Xn", Xn)
3618 selfA._setInternalState("Pn", Pn)
3619 #--------------------------
3621 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3622 # ---> avec analysis
3623 selfA.StoredVariables["Analysis"].store( Xa )
3624 # ---> avec current state
3625 if selfA._parameters["StoreInternalVariables"] \
3626 or selfA._toStore("CurrentState"):
3627 selfA.StoredVariables["CurrentState"].store( Xn )
3628 if selfA._toStore("InnovationAtCurrentState"):
3629 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3630 if selfA._parameters["StoreInternalVariables"] \
3631 or selfA._toStore("CostFunctionJ") \
3632 or selfA._toStore("CostFunctionJb") \
3633 or selfA._toStore("CostFunctionJo"):
3634 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
3635 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
3637 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3638 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3639 selfA.StoredVariables["CostFunctionJ" ].store( J )
3640 if selfA._toStore("APosterioriCovariance"):
3641 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3642 if selfA._parameters["EstimationOf"] == "Parameters" \
3643 and J < previousJMinimum:
3644 previousJMinimum = J
3646 if selfA._toStore("APosterioriCovariance"):
3647 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
3649 # Stockage final supplémentaire de l'optimum en estimation de paramètres
3650 # ----------------------------------------------------------------------
3651 if selfA._parameters["EstimationOf"] == "Parameters":
3652 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3653 selfA.StoredVariables["Analysis"].store( XaMin )
3654 if selfA._toStore("APosterioriCovariance"):
3655 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3656 if selfA._toStore("BMA"):
3657 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3661 # ==============================================================================
3662 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3664 3DVAR variational analysis with no inversion of B
3671 Hm = HO["Direct"].appliedTo
3672 Ha = HO["Adjoint"].appliedInXTo
3674 # Précalcul des inversions de B et R
3678 # Point de démarrage de l'optimisation
3679 Xini = numpy.zeros(Xb.shape)
3681 # Définition de la fonction-coût
3682 # ------------------------------
3683 def CostFunction(v):
3684 _V = numpy.asmatrix(numpy.ravel( v )).T
3686 if selfA._parameters["StoreInternalVariables"] or \
3687 selfA._toStore("CurrentState") or \
3688 selfA._toStore("CurrentOptimum"):
3689 selfA.StoredVariables["CurrentState"].store( _X )
3691 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
3692 _Innovation = Y - _HX
3693 if selfA._toStore("SimulatedObservationAtCurrentState") or \
3694 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3695 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
3696 if selfA._toStore("InnovationAtCurrentState"):
3697 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3699 Jb = float( 0.5 * _V.T * BT * _V )
3700 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
3703 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3704 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3705 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3706 selfA.StoredVariables["CostFunctionJ" ].store( J )
3707 if selfA._toStore("IndexOfOptimum") or \
3708 selfA._toStore("CurrentOptimum") or \
3709 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3710 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3711 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
3712 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3713 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3714 if selfA._toStore("IndexOfOptimum"):
3715 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3716 if selfA._toStore("CurrentOptimum"):
3717 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3718 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3719 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
3720 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3721 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3722 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3723 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3724 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3725 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3728 def GradientOfCostFunction(v):
3729 _V = numpy.asmatrix(numpy.ravel( v )).T
3732 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
3734 GradJo = - Ha( (_X, RI * (Y - _HX)) )
3735 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
3738 # Minimisation de la fonctionnelle
3739 # --------------------------------
3740 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3742 if selfA._parameters["Minimizer"] == "LBFGSB":
3743 if "0.19" <= scipy.version.version <= "1.1.0":
3744 import lbfgsbhlt as optimiseur
3746 import scipy.optimize as optimiseur
3747 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3748 func = CostFunction,
3750 fprime = GradientOfCostFunction,
3752 bounds = selfA._parameters["Bounds"],
3753 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
3754 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
3755 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3756 iprint = selfA._parameters["optiprint"],
3758 nfeval = Informations['funcalls']
3759 rc = Informations['warnflag']
3760 elif selfA._parameters["Minimizer"] == "TNC":
3761 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3762 func = CostFunction,
3764 fprime = GradientOfCostFunction,
3766 bounds = selfA._parameters["Bounds"],
3767 maxfun = selfA._parameters["MaximumNumberOfSteps"],
3768 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3769 ftol = selfA._parameters["CostDecrementTolerance"],
3770 messages = selfA._parameters["optmessages"],
3772 elif selfA._parameters["Minimizer"] == "CG":
3773 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3776 fprime = GradientOfCostFunction,
3778 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3779 gtol = selfA._parameters["GradientNormTolerance"],
3780 disp = selfA._parameters["optdisp"],
3783 elif selfA._parameters["Minimizer"] == "NCG":
3784 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3787 fprime = GradientOfCostFunction,
3789 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3790 avextol = selfA._parameters["CostDecrementTolerance"],
3791 disp = selfA._parameters["optdisp"],
3794 elif selfA._parameters["Minimizer"] == "BFGS":
3795 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3798 fprime = GradientOfCostFunction,
3800 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3801 gtol = selfA._parameters["GradientNormTolerance"],
3802 disp = selfA._parameters["optdisp"],
3806 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3808 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3809 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3811 # Correction pour pallier a un bug de TNC sur le retour du Minimum
3812 # ----------------------------------------------------------------
3813 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3814 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3815 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
3817 Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
3819 # Obtention de l'analyse
3820 # ----------------------
3823 selfA.StoredVariables["Analysis"].store( Xa )
3825 if selfA._toStore("OMA") or \
3826 selfA._toStore("SigmaObs2") or \
3827 selfA._toStore("SimulationQuantiles") or \
3828 selfA._toStore("SimulatedObservationAtOptimum"):
3829 if selfA._toStore("SimulatedObservationAtCurrentState"):
3830 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3831 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3832 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3836 # Calcul de la covariance d'analyse
3837 # ---------------------------------
3838 if selfA._toStore("APosterioriCovariance") or \
3839 selfA._toStore("SimulationQuantiles") or \
3840 selfA._toStore("JacobianMatrixAtOptimum") or \
3841 selfA._toStore("KalmanGainAtOptimum"):
3842 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3843 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3844 if selfA._toStore("APosterioriCovariance") or \
3845 selfA._toStore("SimulationQuantiles") or \
3846 selfA._toStore("KalmanGainAtOptimum"):
3847 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3848 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3849 if selfA._toStore("APosterioriCovariance") or \
3850 selfA._toStore("SimulationQuantiles"):
3855 _ee = numpy.matrix(numpy.zeros(nb)).T
3857 _HtEE = numpy.dot(HtM,_ee)
3858 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
3859 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
3860 HessienneI = numpy.matrix( HessienneI )
3862 if min(A.shape) != max(A.shape):
3863 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
3864 if (numpy.diag(A) < 0).any():
3865 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
3866 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
3868 L = numpy.linalg.cholesky( A )
3870 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
3871 if selfA._toStore("APosterioriCovariance"):
3872 selfA.StoredVariables["APosterioriCovariance"].store( A )
3873 if selfA._toStore("JacobianMatrixAtOptimum"):
3874 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3875 if selfA._toStore("KalmanGainAtOptimum"):
3876 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3877 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3878 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3880 # Calculs et/ou stockages supplémentaires
3881 # ---------------------------------------
3882 if selfA._toStore("Innovation") or \
3883 selfA._toStore("SigmaObs2") or \
3884 selfA._toStore("MahalanobisConsistency") or \
3885 selfA._toStore("OMB"):
3887 if selfA._toStore("Innovation"):
3888 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
3889 if selfA._toStore("BMA"):
3890 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3891 if selfA._toStore("OMA"):
3892 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3893 if selfA._toStore("OMB"):
3894 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
3895 if selfA._toStore("SigmaObs2"):
3896 TraceR = R.trace(Y.size)
3897 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
3898 if selfA._toStore("MahalanobisConsistency"):
3899 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3900 if selfA._toStore("SimulationQuantiles"):
3901 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
3902 if selfA._toStore("SimulatedObservationAtBackground"):
3903 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
3904 if selfA._toStore("SimulatedObservationAtOptimum"):
3905 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
3909 # ==============================================================================
3910 if __name__ == "__main__":
3911 print('\n AUTODIAGNOSTIC\n')