1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les objets numériques génériques.
26 __author__ = "Jean-Philippe ARGAUD"
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
36 # ==============================================================================
37 def ExecuteFunction( triplet ):
38 assert len(triplet) == 3, "Incorrect number of arguments"
39 X, xArgs, funcrepr = triplet
40 __X = numpy.ravel( X ).reshape((-1,1))
41 __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42 __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43 __fonction = getattr(__module,funcrepr["__userFunction__name"])
44 sys.path = __sys_path_tmp ; del __sys_path_tmp
45 if isinstance(xArgs, dict):
46 __HX = __fonction( __X, **xArgs )
48 __HX = __fonction( __X )
49 return numpy.ravel( __HX )
51 # ==============================================================================
52 class FDApproximation(object):
54 Cette classe sert d'interface pour définir les opérateurs approximés. A la
55 création d'un objet, en fournissant une fonction "Function", on obtient un
56 objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
57 "AdjointOperator". On contrôle l'approximation DF avec l'incrément
58 multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
59 "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
60 centrées si le booléen "centeredDF" est vrai.
63 name = "FDApproximation",
68 extraArguments = None,
69 avoidingRedundancy = True,
70 toleranceInRedundancy = 1.e-18,
71 lenghtOfRedundancy = -1,
76 self.__name = str(name)
77 self.__extraArgs = extraArguments
80 import multiprocessing
81 self.__mpEnabled = True
83 self.__mpEnabled = False
85 self.__mpEnabled = False
86 self.__mpWorkers = mpWorkers
87 if self.__mpWorkers is not None and self.__mpWorkers < 1:
88 self.__mpWorkers = None
89 logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
92 self.__mfEnabled = True
94 self.__mfEnabled = False
95 logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
97 if avoidingRedundancy:
99 self.__tolerBP = float(toleranceInRedundancy)
100 self.__lenghtRJ = int(lenghtOfRedundancy)
101 self.__listJPCP = [] # Jacobian Previous Calculated Points
102 self.__listJPCI = [] # Jacobian Previous Calculated Increment
103 self.__listJPCR = [] # Jacobian Previous Calculated Results
104 self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
105 self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
107 self.__avoidRC = False
110 if isinstance(Function,types.FunctionType):
111 logging.debug("FDA Calculs en multiprocessing : FunctionType")
112 self.__userFunction__name = Function.__name__
114 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
116 mod = os.path.abspath(Function.__globals__['__file__'])
117 if not os.path.isfile(mod):
118 raise ImportError("No user defined function or method found with the name %s"%(mod,))
119 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
120 self.__userFunction__path = os.path.dirname(mod)
122 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
123 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
124 elif isinstance(Function,types.MethodType):
125 logging.debug("FDA Calculs en multiprocessing : MethodType")
126 self.__userFunction__name = Function.__name__
128 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
130 mod = os.path.abspath(Function.__func__.__globals__['__file__'])
131 if not os.path.isfile(mod):
132 raise ImportError("No user defined function or method found with the name %s"%(mod,))
133 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
134 self.__userFunction__path = os.path.dirname(mod)
136 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
137 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
139 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
141 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
142 self.__userFunction = self.__userOperator.appliedTo
144 self.__centeredDF = bool(centeredDF)
145 if abs(float(increment)) > 1.e-15:
146 self.__increment = float(increment)
148 self.__increment = 0.01
152 self.__dX = numpy.ravel( dX )
153 logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
155 logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
157 # ---------------------------------------------------------
158 def __doublon__(self, e, l, n, v=None):
159 __ac, __iac = False, -1
160 for i in range(len(l)-1,-1,-1):
161 if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
162 __ac, __iac = True, i
163 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
167 # ---------------------------------------------------------
168 def DirectOperator(self, X, **extraArgs ):
170 Calcul du direct à l'aide de la fonction fournie.
172 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
173 ne doivent pas être données ici à la fonction utilisateur.
175 logging.debug("FDA Calcul DirectOperator (explicite)")
177 _HX = self.__userFunction( X, argsAsSerie = True )
179 _HX = numpy.ravel(self.__userFunction( numpy.ravel(X) ))
183 # ---------------------------------------------------------
184 def TangentMatrix(self, X ):
186 Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
187 c'est-à-dire le gradient de H en X. On utilise des différences finies
188 directionnelles autour du point X. X est un numpy.ndarray.
190 Différences finies centrées (approximation d'ordre 2):
191 1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
192 dX[i] à la composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
193 on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
195 2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
197 3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
199 Différences finies non centrées (approximation d'ordre 1):
200 1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
201 composante X[i] pour composer X_plus_dXi, et on calcule la réponse
202 HX_plus_dXi = H( X_plus_dXi )
203 2/ On calcule la valeur centrale HX = H(X)
204 3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
206 4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
209 logging.debug("FDA Début du calcul de la Jacobienne")
210 logging.debug("FDA Incrément de............: %s*X"%float(self.__increment))
211 logging.debug("FDA Approximation centrée...: %s"%(self.__centeredDF))
213 if X is None or len(X)==0:
214 raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
216 _X = numpy.ravel( X )
218 if self.__dX is None:
219 _dX = self.__increment * _X
221 _dX = numpy.ravel( self.__dX )
222 assert len(_X) == len(_dX), "Inconsistent dX increment length with respect to the X one"
223 assert _X.size == _dX.size, "Inconsistent dX increment size with respect to the X one"
225 if (_dX == 0.).any():
228 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
230 _dX = numpy.where( _dX == 0., moyenne, _dX )
232 __alreadyCalculated = False
234 __bidon, __alreadyCalculatedP = self.__doublon__(_X, self.__listJPCP, self.__listJPPN, None)
235 __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
236 if __alreadyCalculatedP == __alreadyCalculatedI > -1:
237 __alreadyCalculated, __i = True, __alreadyCalculatedP
238 logging.debug("FDA Cas J déjà calculé, récupération du doublon %i"%__i)
240 if __alreadyCalculated:
241 logging.debug("FDA Calcul Jacobienne (par récupération du doublon %i)"%__i)
242 _Jacobienne = self.__listJPCR[__i]
244 logging.debug("FDA Calcul Jacobienne (explicite)")
245 if self.__centeredDF:
247 if self.__mpEnabled and not self.__mfEnabled:
249 "__userFunction__path" : self.__userFunction__path,
250 "__userFunction__modl" : self.__userFunction__modl,
251 "__userFunction__name" : self.__userFunction__name,
254 for i in range( len(_dX) ):
256 _X_plus_dXi = numpy.array( _X, dtype=float )
257 _X_plus_dXi[i] = _X[i] + _dXi
258 _X_moins_dXi = numpy.array( _X, dtype=float )
259 _X_moins_dXi[i] = _X[i] - _dXi
261 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
262 _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
264 import multiprocessing
265 self.__pool = multiprocessing.Pool(self.__mpWorkers)
266 _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
271 for i in range( len(_dX) ):
272 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
274 elif self.__mfEnabled:
276 for i in range( len(_dX) ):
278 _X_plus_dXi = numpy.array( _X, dtype=float )
279 _X_plus_dXi[i] = _X[i] + _dXi
280 _X_moins_dXi = numpy.array( _X, dtype=float )
281 _X_moins_dXi[i] = _X[i] - _dXi
283 _xserie.append( _X_plus_dXi )
284 _xserie.append( _X_moins_dXi )
286 _HX_plusmoins_dX = self.DirectOperator( _xserie )
289 for i in range( len(_dX) ):
290 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
294 for i in range( _dX.size ):
296 _X_plus_dXi = numpy.array( _X, dtype=float )
297 _X_plus_dXi[i] = _X[i] + _dXi
298 _X_moins_dXi = numpy.array( _X, dtype=float )
299 _X_moins_dXi[i] = _X[i] - _dXi
301 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
302 _HX_moins_dXi = self.DirectOperator( _X_moins_dXi )
304 _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
308 if self.__mpEnabled and not self.__mfEnabled:
310 "__userFunction__path" : self.__userFunction__path,
311 "__userFunction__modl" : self.__userFunction__modl,
312 "__userFunction__name" : self.__userFunction__name,
315 _jobs.append( (_X, self.__extraArgs, funcrepr) )
316 for i in range( len(_dX) ):
317 _X_plus_dXi = numpy.array( _X, dtype=float )
318 _X_plus_dXi[i] = _X[i] + _dX[i]
320 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
322 import multiprocessing
323 self.__pool = multiprocessing.Pool(self.__mpWorkers)
324 _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
328 _HX = _HX_plus_dX.pop(0)
331 for i in range( len(_dX) ):
332 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
334 elif self.__mfEnabled:
337 for i in range( len(_dX) ):
338 _X_plus_dXi = numpy.array( _X, dtype=float )
339 _X_plus_dXi[i] = _X[i] + _dX[i]
341 _xserie.append( _X_plus_dXi )
343 _HX_plus_dX = self.DirectOperator( _xserie )
345 _HX = _HX_plus_dX.pop(0)
348 for i in range( len(_dX) ):
349 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
353 _HX = self.DirectOperator( _X )
354 for i in range( _dX.size ):
356 _X_plus_dXi = numpy.array( _X, dtype=float )
357 _X_plus_dXi[i] = _X[i] + _dXi
359 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
361 _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
364 _Jacobienne = numpy.transpose( numpy.vstack( _Jacobienne ) )
366 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
367 while len(self.__listJPCP) > self.__lenghtRJ:
368 self.__listJPCP.pop(0)
369 self.__listJPCI.pop(0)
370 self.__listJPCR.pop(0)
371 self.__listJPPN.pop(0)
372 self.__listJPIN.pop(0)
373 self.__listJPCP.append( copy.copy(_X) )
374 self.__listJPCI.append( copy.copy(_dX) )
375 self.__listJPCR.append( copy.copy(_Jacobienne) )
376 self.__listJPPN.append( numpy.linalg.norm(_X) )
377 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
379 logging.debug("FDA Fin du calcul de la Jacobienne")
383 # ---------------------------------------------------------
384 def TangentOperator(self, paire, **extraArgs ):
386 Calcul du tangent à l'aide de la Jacobienne.
388 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
389 ne doivent pas être données ici à la fonction utilisateur.
392 assert len(paire) == 1, "Incorrect lenght of arguments"
394 assert len(_paire) == 2, "Incorrect number of arguments"
396 assert len(paire) == 2, "Incorrect number of arguments"
399 _Jacobienne = self.TangentMatrix( X )
400 if dX is None or len(dX) == 0:
402 # Calcul de la forme matricielle si le second argument est None
403 # -------------------------------------------------------------
404 if self.__mfEnabled: return [_Jacobienne,]
405 else: return _Jacobienne
408 # Calcul de la valeur linéarisée de H en X appliqué à dX
409 # ------------------------------------------------------
410 _dX = numpy.ravel( dX )
411 _HtX = numpy.dot(_Jacobienne, _dX)
412 if self.__mfEnabled: return [_HtX,]
415 # ---------------------------------------------------------
416 def AdjointOperator(self, paire, **extraArgs ):
418 Calcul de l'adjoint à l'aide de la Jacobienne.
420 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
421 ne doivent pas être données ici à la fonction utilisateur.
424 assert len(paire) == 1, "Incorrect lenght of arguments"
426 assert len(_paire) == 2, "Incorrect number of arguments"
428 assert len(paire) == 2, "Incorrect number of arguments"
431 _JacobienneT = self.TangentMatrix( X ).T
432 if Y is None or len(Y) == 0:
434 # Calcul de la forme matricielle si le second argument est None
435 # -------------------------------------------------------------
436 if self.__mfEnabled: return [_JacobienneT,]
437 else: return _JacobienneT
440 # Calcul de la valeur de l'adjoint en X appliqué à Y
441 # --------------------------------------------------
442 _Y = numpy.ravel( Y )
443 _HaY = numpy.dot(_JacobienneT, _Y)
444 if self.__mfEnabled: return [_HaY,]
447 # ==============================================================================
448 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
449 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
451 _bgcenter = numpy.ravel(_bgcenter)[:,None]
453 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
455 if _bgcovariance is None:
456 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
458 _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
459 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
461 return BackgroundEnsemble
463 # ==============================================================================
464 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
465 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
466 def __CenteredRandomAnomalies(Zr, N):
468 Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
469 notes manuscrites de MB et conforme au code de PS avec eps = -1
472 Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
473 Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
474 R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
479 _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
481 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
482 if _bgcovariance is None:
483 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
486 U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
487 _nbctl = _bgcenter.size
488 if _nbmembers > _nbctl:
489 _Z = numpy.concatenate((numpy.dot(
490 numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
491 numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
493 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
494 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
495 BackgroundEnsemble = _bgcenter + _Zca
497 if max(abs(_bgcovariance.flatten())) > 0:
498 _nbctl = _bgcenter.size
499 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
500 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
501 BackgroundEnsemble = _bgcenter + _Zca
503 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
505 return BackgroundEnsemble
507 # ==============================================================================
508 def EnsembleMean( __Ensemble ):
509 "Renvoie la moyenne empirique d'un ensemble"
510 return numpy.asarray(__Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
512 # ==============================================================================
513 def EnsembleOfAnomalies( __Ensemble, __OptMean = None, __Normalisation = 1.):
514 "Renvoie les anomalies centrées à partir d'un ensemble"
515 if __OptMean is None:
516 __Em = EnsembleMean( __Ensemble )
518 __Em = numpy.ravel( __OptMean ).reshape((-1,1))
520 return __Normalisation * (numpy.asarray( __Ensemble ) - __Em)
522 # ==============================================================================
523 def EnsembleErrorCovariance( __Ensemble, __quick = False ):
524 "Renvoie l'estimation empirique de la covariance d'ensemble"
526 # Covariance rapide mais rarement définie positive
527 __Covariance = numpy.cov( __Ensemble )
529 # Résultat souvent identique à numpy.cov, mais plus robuste
530 __n, __m = numpy.asarray( __Ensemble ).shape
531 __Anomalies = EnsembleOfAnomalies( __Ensemble )
532 # Estimation empirique
533 __Covariance = ( __Anomalies @ __Anomalies.T ) / (__m-1)
535 __Covariance = ( __Covariance + __Covariance.T ) * 0.5
536 # Assure la positivité
537 __epsilon = mpr*numpy.trace( __Covariance )
538 __Covariance = __Covariance + __epsilon * numpy.identity(__n)
542 # ==============================================================================
543 def EnsemblePerturbationWithGivenCovariance( __Ensemble, __Covariance, __Seed=None ):
544 "Ajout d'une perturbation à chaque membre d'un ensemble selon une covariance prescrite"
545 if hasattr(__Covariance,"assparsematrix"):
546 if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance.assparsematrix())/abs(__Ensemble).mean() < mpr).all():
547 # Traitement d'une covariance nulle ou presque
549 if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance.assparsematrix()) < mpr).all():
550 # Traitement d'une covariance nulle ou presque
553 if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance)/abs(__Ensemble).mean() < mpr).all():
554 # Traitement d'une covariance nulle ou presque
556 if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance) < mpr).all():
557 # Traitement d'une covariance nulle ou presque
560 __n, __m = __Ensemble.shape
561 if __Seed is not None: numpy.random.seed(__Seed)
563 if hasattr(__Covariance,"isscalar") and __Covariance.isscalar():
564 # Traitement d'une covariance multiple de l'identité
566 __std = numpy.sqrt(__Covariance.assparsematrix())
567 __Ensemble += numpy.random.normal(__zero, __std, size=(__m,__n)).T
569 elif hasattr(__Covariance,"isvector") and __Covariance.isvector():
570 # Traitement d'une covariance diagonale avec variances non identiques
571 __zero = numpy.zeros(__n)
572 __std = numpy.sqrt(__Covariance.assparsematrix())
573 __Ensemble += numpy.asarray([numpy.random.normal(__zero, __std) for i in range(__m)]).T
575 elif hasattr(__Covariance,"ismatrix") and __Covariance.ismatrix():
576 # Traitement d'une covariance pleine
577 __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance.asfullmatrix(__n), size=__m).T
579 elif isinstance(__Covariance, numpy.ndarray):
580 # Traitement d'une covariance numpy pleine, sachant qu'on arrive ici en dernier
581 __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance, size=__m).T
584 raise ValueError("Error in ensemble perturbation with inadequate covariance specification")
588 # ==============================================================================
589 def CovarianceInflation(
591 InflationType = None,
592 InflationFactor = None,
593 BackgroundCov = None,
596 Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
598 Synthèse : Hunt 2007, section 2.3.5
600 if InflationFactor is None:
603 InflationFactor = float(InflationFactor)
605 if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
606 if InflationFactor < 1.:
607 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
608 if InflationFactor < 1.+mpr:
610 OutputCovOrEns = InflationFactor**2 * InputCovOrEns
612 elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
613 if InflationFactor < 1.:
614 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
615 if InflationFactor < 1.+mpr:
617 InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
618 OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
619 + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
621 elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
622 if InflationFactor < 0.:
623 raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
624 if InflationFactor < mpr:
626 __n, __m = numpy.asarray(InputCovOrEns).shape
628 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
629 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
631 elif InflationType == "HybridOnBackgroundCovariance":
632 if InflationFactor < 0.:
633 raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
634 if InflationFactor < mpr:
636 __n, __m = numpy.asarray(InputCovOrEns).shape
638 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
639 if BackgroundCov is None:
640 raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
641 if InputCovOrEns.shape != BackgroundCov.shape:
642 raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
643 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
645 elif InflationType == "Relaxation":
646 raise NotImplementedError("InflationType Relaxation")
649 raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
651 return OutputCovOrEns
653 # ==============================================================================
654 def HessienneEstimation(nb, HaM, HtM, BI, RI):
655 "Estimation de la Hessienne"
658 for i in range(int(nb)):
659 _ee = numpy.zeros((nb,1))
661 _HtEE = numpy.dot(HtM,_ee).reshape((-1,1))
662 HessienneI.append( numpy.ravel( BI * _ee + HaM * (RI * _HtEE) ) )
664 A = numpy.linalg.inv(numpy.array( HessienneI ))
666 if min(A.shape) != max(A.shape):
667 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
668 if (numpy.diag(A) < 0).any():
669 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
670 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
672 L = numpy.linalg.cholesky( A )
674 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
678 # ==============================================================================
679 def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None):
680 "Estimation des quantiles a posteriori (selfA est modifié)"
681 nbsamples = selfA._parameters["NumberOfSamplesForQuantiles"]
683 # Traitement des bornes
684 if "StateBoundsForQuantiles" in selfA._parameters:
685 LBounds = selfA._parameters["StateBoundsForQuantiles"] # Prioritaire
686 elif "Bounds" in selfA._parameters:
687 LBounds = selfA._parameters["Bounds"] # Défaut raisonnable
690 if LBounds is not None:
691 LBounds = ForceNumericBounds( LBounds )
692 _Xa = numpy.ravel(Xa)
694 # Échantillonnage des états
697 for i in range(nbsamples):
698 if selfA._parameters["SimulationForQuantiles"] == "Linear" and HtM is not None and HXa is not None:
699 dXr = (numpy.random.multivariate_normal(_Xa,A) - _Xa).reshape((-1,1))
700 if LBounds is not None: # "EstimateProjection" par défaut
701 dXr = numpy.max(numpy.hstack((dXr,LBounds[:,0].reshape((-1,1))) - Xa),axis=1)
702 dXr = numpy.min(numpy.hstack((dXr,LBounds[:,1].reshape((-1,1))) - Xa),axis=1)
704 Yr = HXa.reshape((-1,1)) + dYr
705 if selfA._toStore("SampledStateForQuantiles"): Xr = _Xa + numpy.ravel(dXr)
706 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear" and Hm is not None:
707 Xr = numpy.random.multivariate_normal(_Xa,A)
708 if LBounds is not None: # "EstimateProjection" par défaut
709 Xr = numpy.max(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,0].reshape((-1,1)))),axis=1)
710 Xr = numpy.min(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,1].reshape((-1,1)))),axis=1)
713 raise ValueError("Quantile simulations has only to be Linear or NonLinear.")
716 YfQ = Yr.reshape((-1,1))
717 if selfA._toStore("SampledStateForQuantiles"): EXr = Xr.reshape((-1,1))
719 YfQ = numpy.hstack((YfQ,Yr.reshape((-1,1))))
720 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.hstack((EXr,Xr.reshape((-1,1))))
722 # Extraction des quantiles
725 for quantile in selfA._parameters["Quantiles"]:
726 if not (0. <= float(quantile) <= 1.): continue
727 indice = int(nbsamples * float(quantile) - 1./nbsamples)
728 if YQ is None: YQ = YfQ[:,indice].reshape((-1,1))
729 else: YQ = numpy.hstack((YQ,YfQ[:,indice].reshape((-1,1))))
730 if YQ is not None: # Liste non vide de quantiles
731 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
732 if selfA._toStore("SampledStateForQuantiles"):
733 selfA.StoredVariables["SampledStateForQuantiles"].store( EXr )
737 # ==============================================================================
738 def ForceNumericBounds( __Bounds ):
739 "Force les bornes à être des valeurs numériques, sauf si globalement None"
740 # Conserve une valeur par défaut à None s'il n'y a pas de bornes
741 if __Bounds is None: return None
742 # Converti toutes les bornes individuelles None à +/- l'infini
743 __Bounds = numpy.asarray( __Bounds, dtype=float )
744 if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
745 raise ValueError("Incorrectly shaped bounds data")
746 __Bounds[numpy.isnan(__Bounds[:,0]),0] = -sys.float_info.max
747 __Bounds[numpy.isnan(__Bounds[:,1]),1] = sys.float_info.max
750 # ==============================================================================
751 def RecentredBounds( __Bounds, __Center):
752 "Recentre les bornes autour de 0, sauf si globalement None"
753 # Conserve une valeur par défaut à None s'il n'y a pas de bornes
754 if __Bounds is None: return None
755 # Recentre les valeurs numériques de bornes
756 return ForceNumericBounds( __Bounds ) - numpy.ravel( __Center ).transpose((-1,1))
758 # ==============================================================================
759 def ApplyBounds( __Vector, __Bounds, __newClip = True):
760 "Applique des bornes numériques à un point"
761 # Conserve une valeur par défaut s'il n'y a pas de bornes
762 if __Bounds is None: return __Vector
764 if not isinstance(__Vector, numpy.ndarray): # Is an array
765 raise ValueError("Incorrect array definition of vector data")
766 if not isinstance(__Bounds, numpy.ndarray): # Is an array
767 raise ValueError("Incorrect array definition of bounds data")
768 if 2*__Vector.size != __Bounds.size: # Is a 2 column array of vector lenght
769 raise ValueError("Incorrect bounds number to be applied for this vector")
770 if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
771 raise ValueError("Incorrectly shaped bounds data")
774 __Vector = __Vector.clip(
775 __Bounds[:,0].reshape(__Vector.shape),
776 __Bounds[:,1].reshape(__Vector.shape),
779 __Vector = numpy.max(numpy.hstack((__Vector.reshape((-1,1)),numpy.asmatrix(__Bounds)[:,0])),axis=1)
780 __Vector = numpy.min(numpy.hstack((__Vector.reshape((-1,1)),numpy.asmatrix(__Bounds)[:,1])),axis=1)
781 __Vector = numpy.asarray(__Vector)
785 # ==============================================================================
786 def c2ukf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
788 Constrained Unscented Kalman Filter
790 if selfA._parameters["EstimationOf"] == "Parameters":
791 selfA._parameters["StoreInternalVariables"] = True
792 selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
795 Alpha = selfA._parameters["Alpha"]
796 Beta = selfA._parameters["Beta"]
797 if selfA._parameters["Kappa"] == 0:
798 if selfA._parameters["EstimationOf"] == "State":
800 elif selfA._parameters["EstimationOf"] == "Parameters":
803 Kappa = selfA._parameters["Kappa"]
804 Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
805 Gamma = math.sqrt( L + Lambda )
810 Ww.append( 1. / (2.*(L + Lambda)) )
812 Wm = numpy.array( Ww )
813 Wm[0] = Lambda / (L + Lambda)
814 Wc = numpy.array( Ww )
815 Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
818 Hm = HO["Direct"].appliedControledFormTo
820 if selfA._parameters["EstimationOf"] == "State":
821 Mm = EM["Direct"].appliedControledFormTo
823 if CM is not None and "Tangent" in CM and U is not None:
824 Cm = CM["Tangent"].asMatrix(Xb)
828 # Durée d'observation et tailles
829 if hasattr(Y,"stepnumber"):
830 duration = Y.stepnumber()
831 __p = numpy.cumprod(Y.shape())[-1]
834 __p = numpy.array(Y).size
836 # Précalcul des inversions de B et R
837 if selfA._parameters["StoreInternalVariables"] \
838 or selfA._toStore("CostFunctionJ") \
839 or selfA._toStore("CostFunctionJb") \
840 or selfA._toStore("CostFunctionJo") \
841 or selfA._toStore("CurrentOptimum") \
842 or selfA._toStore("APosterioriCovariance"):
848 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
850 if hasattr(B,"asfullmatrix"):
851 Pn = B.asfullmatrix(__n)
854 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
855 selfA.StoredVariables["Analysis"].store( Xb )
856 if selfA._toStore("APosterioriCovariance"):
857 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
858 elif selfA._parameters["nextStep"]:
859 Xn = selfA._getInternalState("Xn")
860 Pn = selfA._getInternalState("Pn")
862 if selfA._parameters["EstimationOf"] == "Parameters":
864 previousJMinimum = numpy.finfo(float).max
866 for step in range(duration-1):
867 if hasattr(Y,"store"):
868 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
870 Ynpu = numpy.ravel( Y ).reshape((__p,1))
873 if hasattr(U,"store") and len(U)>1:
874 Un = numpy.ravel( U[step] ).reshape((-1,1))
875 elif hasattr(U,"store") and len(U)==1:
876 Un = numpy.ravel( U[0] ).reshape((-1,1))
878 Un = numpy.ravel( U ).reshape((-1,1))
882 Pndemi = numpy.real(scipy.linalg.sqrtm(Pn))
883 Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
886 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
887 for point in range(nbSpts):
888 Xnp[:,point] = ApplyBounds( Xnp[:,point], selfA._parameters["Bounds"] )
891 for point in range(nbSpts):
892 if selfA._parameters["EstimationOf"] == "State":
893 XEtnnpi = numpy.asarray( Mm( (Xnp[:,point], Un) ) ).reshape((-1,1))
894 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
895 Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
896 XEtnnpi = XEtnnpi + Cm * Un
897 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
898 XEtnnpi = ApplyBounds( XEtnnpi, selfA._parameters["Bounds"] )
899 elif selfA._parameters["EstimationOf"] == "Parameters":
900 # --- > Par principe, M = Id, Q = 0
901 XEtnnpi = Xnp[:,point]
902 XEtnnp.append( numpy.ravel(XEtnnpi).reshape((-1,1)) )
903 XEtnnp = numpy.concatenate( XEtnnp, axis=1 )
905 Xncm = ( XEtnnp * Wm ).sum(axis=1)
907 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
908 Xncm = ApplyBounds( Xncm, selfA._parameters["Bounds"] )
910 if selfA._parameters["EstimationOf"] == "State": Pnm = Q
911 elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
912 for point in range(nbSpts):
913 Pnm += Wc[i] * ((XEtnnp[:,point]-Xncm).reshape((-1,1)) * (XEtnnp[:,point]-Xncm))
915 if selfA._parameters["EstimationOf"] == "Parameters" and selfA._parameters["Bounds"] is not None:
916 Pnmdemi = selfA._parameters["Reconditioner"] * numpy.real(scipy.linalg.sqrtm(Pnm))
918 Pnmdemi = numpy.real(scipy.linalg.sqrtm(Pnm))
920 Xnnp = numpy.hstack([Xncm.reshape((-1,1)), Xncm.reshape((-1,1))+Gamma*Pnmdemi, Xncm.reshape((-1,1))-Gamma*Pnmdemi])
922 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
923 for point in range(nbSpts):
924 Xnnp[:,point] = ApplyBounds( Xnnp[:,point], selfA._parameters["Bounds"] )
927 for point in range(nbSpts):
928 if selfA._parameters["EstimationOf"] == "State":
929 Ynnpi = Hm( (Xnnp[:,point], None) )
930 elif selfA._parameters["EstimationOf"] == "Parameters":
931 Ynnpi = Hm( (Xnnp[:,point], Un) )
932 Ynnp.append( numpy.ravel(Ynnpi).reshape((-1,1)) )
933 Ynnp = numpy.concatenate( Ynnp, axis=1 )
935 Yncm = ( Ynnp * Wm ).sum(axis=1)
939 for point in range(nbSpts):
940 Pyyn += Wc[i] * ((Ynnp[:,point]-Yncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
941 Pxyn += Wc[i] * ((Xnnp[:,point]-Xncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
943 _Innovation = Ynpu - Yncm.reshape((-1,1))
944 if selfA._parameters["EstimationOf"] == "Parameters":
945 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
946 _Innovation = _Innovation - Cm * Un
949 Xn = Xncm.reshape((-1,1)) + Kn * _Innovation
950 Pn = Pnm - Kn * Pyyn * Kn.T
952 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
953 Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
956 #--------------------------
957 selfA._setInternalState("Xn", Xn)
958 selfA._setInternalState("Pn", Pn)
959 #--------------------------
961 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
963 selfA.StoredVariables["Analysis"].store( Xa )
964 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
965 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) )
966 if selfA._toStore("InnovationAtCurrentAnalysis"):
967 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
968 # ---> avec current state
969 if selfA._parameters["StoreInternalVariables"] \
970 or selfA._toStore("CurrentState"):
971 selfA.StoredVariables["CurrentState"].store( Xn )
972 if selfA._toStore("ForecastState"):
973 selfA.StoredVariables["ForecastState"].store( Xncm )
974 if selfA._toStore("ForecastCovariance"):
975 selfA.StoredVariables["ForecastCovariance"].store( Pnm )
976 if selfA._toStore("BMA"):
977 selfA.StoredVariables["BMA"].store( Xncm - Xa )
978 if selfA._toStore("InnovationAtCurrentState"):
979 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
980 if selfA._toStore("SimulatedObservationAtCurrentState") \
981 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
982 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm )
984 if selfA._parameters["StoreInternalVariables"] \
985 or selfA._toStore("CostFunctionJ") \
986 or selfA._toStore("CostFunctionJb") \
987 or selfA._toStore("CostFunctionJo") \
988 or selfA._toStore("CurrentOptimum") \
989 or selfA._toStore("APosterioriCovariance"):
990 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
991 Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
993 selfA.StoredVariables["CostFunctionJb"].store( Jb )
994 selfA.StoredVariables["CostFunctionJo"].store( Jo )
995 selfA.StoredVariables["CostFunctionJ" ].store( J )
997 if selfA._toStore("IndexOfOptimum") \
998 or selfA._toStore("CurrentOptimum") \
999 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1000 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1001 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1002 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1003 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1004 if selfA._toStore("IndexOfOptimum"):
1005 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1006 if selfA._toStore("CurrentOptimum"):
1007 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1008 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1009 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1010 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1011 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1012 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1013 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1014 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1015 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1016 if selfA._toStore("APosterioriCovariance"):
1017 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1018 if selfA._parameters["EstimationOf"] == "Parameters" \
1019 and J < previousJMinimum:
1020 previousJMinimum = J
1022 if selfA._toStore("APosterioriCovariance"):
1023 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1025 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1026 # ----------------------------------------------------------------------
1027 if selfA._parameters["EstimationOf"] == "Parameters":
1028 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1029 selfA.StoredVariables["Analysis"].store( XaMin )
1030 if selfA._toStore("APosterioriCovariance"):
1031 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1032 if selfA._toStore("BMA"):
1033 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1037 # ==============================================================================
1038 def cekf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1040 Contrained Extended Kalman Filter
1042 if selfA._parameters["EstimationOf"] == "Parameters":
1043 selfA._parameters["StoreInternalVariables"] = True
1044 selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
1047 H = HO["Direct"].appliedControledFormTo
1049 if selfA._parameters["EstimationOf"] == "State":
1050 M = EM["Direct"].appliedControledFormTo
1052 if CM is not None and "Tangent" in CM and U is not None:
1053 Cm = CM["Tangent"].asMatrix(Xb)
1057 # Durée d'observation et tailles
1058 if hasattr(Y,"stepnumber"):
1059 duration = Y.stepnumber()
1060 __p = numpy.cumprod(Y.shape())[-1]
1063 __p = numpy.array(Y).size
1065 # Précalcul des inversions de B et R
1066 if selfA._parameters["StoreInternalVariables"] \
1067 or selfA._toStore("CostFunctionJ") \
1068 or selfA._toStore("CostFunctionJb") \
1069 or selfA._toStore("CostFunctionJo") \
1070 or selfA._toStore("CurrentOptimum") \
1071 or selfA._toStore("APosterioriCovariance"):
1077 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1080 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1081 selfA.StoredVariables["Analysis"].store( Xb )
1082 if selfA._toStore("APosterioriCovariance"):
1083 if hasattr(B,"asfullmatrix"):
1084 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1086 selfA.StoredVariables["APosterioriCovariance"].store( B )
1087 selfA._setInternalState("seed", numpy.random.get_state())
1088 elif selfA._parameters["nextStep"]:
1089 Xn = selfA._getInternalState("Xn")
1090 Pn = selfA._getInternalState("Pn")
1092 if selfA._parameters["EstimationOf"] == "Parameters":
1094 previousJMinimum = numpy.finfo(float).max
1096 for step in range(duration-1):
1097 if hasattr(Y,"store"):
1098 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1100 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1102 Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
1103 Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
1104 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1105 Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
1107 if selfA._parameters["EstimationOf"] == "State":
1108 Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
1109 Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
1110 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1111 Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
1114 if hasattr(U,"store") and len(U)>1:
1115 Un = numpy.ravel( U[step] ).reshape((-1,1))
1116 elif hasattr(U,"store") and len(U)==1:
1117 Un = numpy.ravel( U[0] ).reshape((-1,1))
1119 Un = numpy.ravel( U ).reshape((-1,1))
1123 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1124 Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
1126 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1127 Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
1128 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1129 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1130 Xn_predicted = Xn_predicted + Cm * Un
1131 Pn_predicted = Q + Mt * (Pn * Ma)
1132 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1133 # --- > Par principe, M = Id, Q = 0
1137 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1138 Xn_predicted = ApplyBounds( Xn_predicted, selfA._parameters["Bounds"] )
1140 if selfA._parameters["EstimationOf"] == "State":
1141 HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
1142 _Innovation = Ynpu - HX_predicted
1143 elif selfA._parameters["EstimationOf"] == "Parameters":
1144 HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
1145 _Innovation = Ynpu - HX_predicted
1146 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
1147 _Innovation = _Innovation - Cm * Un
1149 Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
1150 Xn = Xn_predicted + Kn * _Innovation
1151 Pn = Pn_predicted - Kn * Ht * Pn_predicted
1153 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1154 Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
1157 #--------------------------
1158 selfA._setInternalState("Xn", Xn)
1159 selfA._setInternalState("Pn", Pn)
1160 #--------------------------
1162 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1163 # ---> avec analysis
1164 selfA.StoredVariables["Analysis"].store( Xa )
1165 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1166 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
1167 if selfA._toStore("InnovationAtCurrentAnalysis"):
1168 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1169 # ---> avec current state
1170 if selfA._parameters["StoreInternalVariables"] \
1171 or selfA._toStore("CurrentState"):
1172 selfA.StoredVariables["CurrentState"].store( Xn )
1173 if selfA._toStore("ForecastState"):
1174 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1175 if selfA._toStore("ForecastCovariance"):
1176 selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
1177 if selfA._toStore("BMA"):
1178 selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
1179 if selfA._toStore("InnovationAtCurrentState"):
1180 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1181 if selfA._toStore("SimulatedObservationAtCurrentState") \
1182 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1183 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1185 if selfA._parameters["StoreInternalVariables"] \
1186 or selfA._toStore("CostFunctionJ") \
1187 or selfA._toStore("CostFunctionJb") \
1188 or selfA._toStore("CostFunctionJo") \
1189 or selfA._toStore("CurrentOptimum") \
1190 or selfA._toStore("APosterioriCovariance"):
1191 Jb = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
1192 Jo = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
1194 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1195 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1196 selfA.StoredVariables["CostFunctionJ" ].store( J )
1198 if selfA._toStore("IndexOfOptimum") \
1199 or selfA._toStore("CurrentOptimum") \
1200 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1201 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1202 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1203 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1204 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1205 if selfA._toStore("IndexOfOptimum"):
1206 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1207 if selfA._toStore("CurrentOptimum"):
1208 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1209 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1210 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1211 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1212 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1213 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1214 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1215 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1216 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1217 if selfA._toStore("APosterioriCovariance"):
1218 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1219 if selfA._parameters["EstimationOf"] == "Parameters" \
1220 and J < previousJMinimum:
1221 previousJMinimum = J
1223 if selfA._toStore("APosterioriCovariance"):
1224 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1226 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1227 # ----------------------------------------------------------------------
1228 if selfA._parameters["EstimationOf"] == "Parameters":
1229 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1230 selfA.StoredVariables["Analysis"].store( XaMin )
1231 if selfA._toStore("APosterioriCovariance"):
1232 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1233 if selfA._toStore("BMA"):
1234 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1238 # ==============================================================================
1239 def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
1245 H = HO["Direct"].appliedControledFormTo
1247 if selfA._parameters["EstimationOf"] == "State":
1248 M = EM["Direct"].appliedControledFormTo
1250 if CM is not None and "Tangent" in CM and U is not None:
1251 Cm = CM["Tangent"].asMatrix(Xb)
1255 # Précalcul des inversions de B et R
1258 # Durée d'observation et tailles
1259 LagL = selfA._parameters["SmootherLagL"]
1260 if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
1261 raise ValueError("Fixed-lag smoother requires a series of observation")
1262 if Y.stepnumber() < LagL:
1263 raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
1264 duration = Y.stepnumber()
1265 __p = numpy.cumprod(Y.shape())[-1]
1267 __m = selfA._parameters["NumberOfMembers"]
1269 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1270 selfA.StoredVariables["Analysis"].store( Xb )
1271 if selfA._toStore("APosterioriCovariance"):
1272 if hasattr(B,"asfullmatrix"):
1273 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1275 selfA.StoredVariables["APosterioriCovariance"].store( B )
1277 # Calcul direct initial (on privilégie la mémorisation au recalcul)
1278 __seed = numpy.random.get_state()
1279 selfB = copy.deepcopy(selfA)
1280 selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
1281 if VariantM == "EnKS16-KalmanFilterFormula":
1282 etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
1284 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1286 EL = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
1288 EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
1289 selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
1291 for step in range(LagL,duration-1):
1293 sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
1296 if hasattr(Y,"store"):
1297 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1299 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1302 if hasattr(U,"store") and len(U)>1:
1303 Un = numpy.ravel( U[step] ).reshape((-1,1))
1304 elif hasattr(U,"store") and len(U)==1:
1305 Un = numpy.ravel( U[0] ).reshape((-1,1))
1307 Un = numpy.ravel( U ).reshape((-1,1))
1311 #--------------------------
1312 if VariantM == "EnKS16-KalmanFilterFormula":
1313 if selfA._parameters["EstimationOf"] == "State": # Forecast
1314 EL = M( [(EL[:,i], Un) for i in range(__m)],
1316 returnSerieAsArrayMatrix = True )
1317 EL = EnsemblePerturbationWithGivenCovariance( EL, Q )
1318 EZ = H( [(EL[:,i], Un) for i in range(__m)],
1320 returnSerieAsArrayMatrix = True )
1321 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1322 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1324 elif selfA._parameters["EstimationOf"] == "Parameters":
1325 # --- > Par principe, M = Id, Q = 0
1326 EZ = H( [(EL[:,i], Un) for i in range(__m)],
1328 returnSerieAsArrayMatrix = True )
1330 vEm = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1331 vZm = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1333 mS = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
1334 mS = mS.reshape((-1,__m)) # Pour dimension 1
1335 delta = RIdemi @ ( Ynpu - vZm )
1336 mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
1337 vw = mT @ mS.T @ delta
1339 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
1340 mU = numpy.identity(__m)
1341 wTU = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
1343 EX = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
1347 for irl in range(LagL): # Lissage des L précédentes analysis
1348 vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1349 EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
1350 sEL[irl] = vEm + EX @ wTU
1352 # Conservation de l'analyse retrospective d'ordre 0 avant rotation
1353 Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1354 if selfA._toStore("APosterioriCovariance"):
1357 for irl in range(LagL):
1358 sEL[irl] = sEL[irl+1]
1360 #--------------------------
1362 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1364 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1365 # ---> avec analysis
1366 selfA.StoredVariables["Analysis"].store( Xa )
1367 if selfA._toStore("APosterioriCovariance"):
1368 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
1370 # Stockage des dernières analyses incomplètement remises à jour
1371 for irl in range(LagL):
1372 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1373 Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1374 selfA.StoredVariables["Analysis"].store( Xa )
1378 # ==============================================================================
1379 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
1381 Ensemble-Transform EnKF
1383 if selfA._parameters["EstimationOf"] == "Parameters":
1384 selfA._parameters["StoreInternalVariables"] = True
1387 H = HO["Direct"].appliedControledFormTo
1389 if selfA._parameters["EstimationOf"] == "State":
1390 M = EM["Direct"].appliedControledFormTo
1392 if CM is not None and "Tangent" in CM and U is not None:
1393 Cm = CM["Tangent"].asMatrix(Xb)
1397 # Durée d'observation et tailles
1398 if hasattr(Y,"stepnumber"):
1399 duration = Y.stepnumber()
1400 __p = numpy.cumprod(Y.shape())[-1]
1403 __p = numpy.array(Y).size
1405 # Précalcul des inversions de B et R
1406 if selfA._parameters["StoreInternalVariables"] \
1407 or selfA._toStore("CostFunctionJ") \
1408 or selfA._toStore("CostFunctionJb") \
1409 or selfA._toStore("CostFunctionJo") \
1410 or selfA._toStore("CurrentOptimum") \
1411 or selfA._toStore("APosterioriCovariance"):
1414 elif VariantM != "KalmanFilterFormula":
1416 if VariantM == "KalmanFilterFormula":
1420 __m = selfA._parameters["NumberOfMembers"]
1422 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1423 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1424 selfA.StoredVariables["Analysis"].store( Xb )
1425 if selfA._toStore("APosterioriCovariance"):
1426 if hasattr(B,"asfullmatrix"):
1427 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1429 selfA.StoredVariables["APosterioriCovariance"].store( B )
1430 selfA._setInternalState("seed", numpy.random.get_state())
1431 elif selfA._parameters["nextStep"]:
1432 Xn = selfA._getInternalState("Xn")
1434 previousJMinimum = numpy.finfo(float).max
1436 for step in range(duration-1):
1437 numpy.random.set_state(selfA._getInternalState("seed"))
1438 if hasattr(Y,"store"):
1439 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1441 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1444 if hasattr(U,"store") and len(U)>1:
1445 Un = numpy.ravel( U[step] ).reshape((-1,1))
1446 elif hasattr(U,"store") and len(U)==1:
1447 Un = numpy.ravel( U[0] ).reshape((-1,1))
1449 Un = numpy.ravel( U ).reshape((-1,1))
1453 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1454 Xn = CovarianceInflation( Xn,
1455 selfA._parameters["InflationType"],
1456 selfA._parameters["InflationFactor"],
1459 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1460 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1462 returnSerieAsArrayMatrix = True )
1463 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
1464 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1466 returnSerieAsArrayMatrix = True )
1467 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1468 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1469 Xn_predicted = Xn_predicted + Cm * Un
1470 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1471 # --- > Par principe, M = Id, Q = 0
1472 Xn_predicted = EMX = Xn
1473 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1475 returnSerieAsArrayMatrix = True )
1477 # Mean of forecast and observation of forecast
1478 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1479 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1482 EaX = EnsembleOfAnomalies( Xn_predicted, Xfm )
1483 EaHX = EnsembleOfAnomalies( HX_predicted, Hfm)
1485 #--------------------------
1486 if VariantM == "KalmanFilterFormula":
1487 mS = RIdemi * EaHX / math.sqrt(__m-1)
1488 mS = mS.reshape((-1,__m)) # Pour dimension 1
1489 delta = RIdemi * ( Ynpu - Hfm )
1490 mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
1491 vw = mT @ mS.T @ delta
1493 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
1494 mU = numpy.identity(__m)
1496 EaX = EaX / math.sqrt(__m-1)
1497 Xn = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
1498 #--------------------------
1499 elif VariantM == "Variational":
1500 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1501 def CostFunction(w):
1502 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1503 _Jo = 0.5 * _A.T @ (RI * _A)
1504 _Jb = 0.5 * (__m-1) * w.T @ w
1507 def GradientOfCostFunction(w):
1508 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1509 _GardJo = - EaHX.T @ (RI * _A)
1510 _GradJb = (__m-1) * w.reshape((__m,1))
1511 _GradJ = _GardJo + _GradJb
1512 return numpy.ravel(_GradJ)
1513 vw = scipy.optimize.fmin_cg(
1515 x0 = numpy.zeros(__m),
1516 fprime = GradientOfCostFunction,
1521 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1522 Htb = (__m-1) * numpy.identity(__m)
1525 Pta = numpy.linalg.inv( Hta )
1526 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1528 Xn = Xfm + EaX @ (vw[:,None] + EWa)
1529 #--------------------------
1530 elif VariantM == "FiniteSize11": # Jauge Boc2011
1531 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1532 def CostFunction(w):
1533 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1534 _Jo = 0.5 * _A.T @ (RI * _A)
1535 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
1538 def GradientOfCostFunction(w):
1539 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1540 _GardJo = - EaHX.T @ (RI * _A)
1541 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1542 _GradJ = _GardJo + _GradJb
1543 return numpy.ravel(_GradJ)
1544 vw = scipy.optimize.fmin_cg(
1546 x0 = numpy.zeros(__m),
1547 fprime = GradientOfCostFunction,
1552 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1554 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1555 / (1 + 1/__m + vw.T @ vw)**2
1558 Pta = numpy.linalg.inv( Hta )
1559 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1561 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1562 #--------------------------
1563 elif VariantM == "FiniteSize15": # Jauge Boc2015
1564 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1565 def CostFunction(w):
1566 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1567 _Jo = 0.5 * _A.T * RI * _A
1568 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
1571 def GradientOfCostFunction(w):
1572 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1573 _GardJo = - EaHX.T @ (RI * _A)
1574 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1575 _GradJ = _GardJo + _GradJb
1576 return numpy.ravel(_GradJ)
1577 vw = scipy.optimize.fmin_cg(
1579 x0 = numpy.zeros(__m),
1580 fprime = GradientOfCostFunction,
1585 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1587 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1588 / (1 + 1/__m + vw.T @ vw)**2
1591 Pta = numpy.linalg.inv( Hta )
1592 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1594 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1595 #--------------------------
1596 elif VariantM == "FiniteSize16": # Jauge Boc2016
1597 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1598 def CostFunction(w):
1599 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1600 _Jo = 0.5 * _A.T @ (RI * _A)
1601 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
1604 def GradientOfCostFunction(w):
1605 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1606 _GardJo = - EaHX.T @ (RI * _A)
1607 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
1608 _GradJ = _GardJo + _GradJb
1609 return numpy.ravel(_GradJ)
1610 vw = scipy.optimize.fmin_cg(
1612 x0 = numpy.zeros(__m),
1613 fprime = GradientOfCostFunction,
1618 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1619 Htb = ((__m+1) / (__m-1)) * \
1620 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
1621 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
1624 Pta = numpy.linalg.inv( Hta )
1625 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1627 Xn = Xfm + EaX @ (vw[:,None] + EWa)
1628 #--------------------------
1630 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1632 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1633 Xn = CovarianceInflation( Xn,
1634 selfA._parameters["InflationType"],
1635 selfA._parameters["InflationFactor"],
1638 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1639 #--------------------------
1640 selfA._setInternalState("Xn", Xn)
1641 selfA._setInternalState("seed", numpy.random.get_state())
1642 #--------------------------
1644 if selfA._parameters["StoreInternalVariables"] \
1645 or selfA._toStore("CostFunctionJ") \
1646 or selfA._toStore("CostFunctionJb") \
1647 or selfA._toStore("CostFunctionJo") \
1648 or selfA._toStore("APosterioriCovariance") \
1649 or selfA._toStore("InnovationAtCurrentAnalysis") \
1650 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1651 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1652 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1653 _Innovation = Ynpu - _HXa
1655 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1656 # ---> avec analysis
1657 selfA.StoredVariables["Analysis"].store( Xa )
1658 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1659 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1660 if selfA._toStore("InnovationAtCurrentAnalysis"):
1661 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1662 # ---> avec current state
1663 if selfA._parameters["StoreInternalVariables"] \
1664 or selfA._toStore("CurrentState"):
1665 selfA.StoredVariables["CurrentState"].store( Xn )
1666 if selfA._toStore("ForecastState"):
1667 selfA.StoredVariables["ForecastState"].store( EMX )
1668 if selfA._toStore("ForecastCovariance"):
1669 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
1670 if selfA._toStore("BMA"):
1671 selfA.StoredVariables["BMA"].store( EMX - Xa )
1672 if selfA._toStore("InnovationAtCurrentState"):
1673 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1674 if selfA._toStore("SimulatedObservationAtCurrentState") \
1675 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1676 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1678 if selfA._parameters["StoreInternalVariables"] \
1679 or selfA._toStore("CostFunctionJ") \
1680 or selfA._toStore("CostFunctionJb") \
1681 or selfA._toStore("CostFunctionJo") \
1682 or selfA._toStore("CurrentOptimum") \
1683 or selfA._toStore("APosterioriCovariance"):
1684 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1685 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1687 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1688 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1689 selfA.StoredVariables["CostFunctionJ" ].store( J )
1691 if selfA._toStore("IndexOfOptimum") \
1692 or selfA._toStore("CurrentOptimum") \
1693 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1694 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1695 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1696 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1697 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1698 if selfA._toStore("IndexOfOptimum"):
1699 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1700 if selfA._toStore("CurrentOptimum"):
1701 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1702 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1703 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1704 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1705 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1706 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1707 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1708 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1709 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1710 if selfA._toStore("APosterioriCovariance"):
1711 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1712 if selfA._parameters["EstimationOf"] == "Parameters" \
1713 and J < previousJMinimum:
1714 previousJMinimum = J
1716 if selfA._toStore("APosterioriCovariance"):
1717 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1718 # ---> Pour les smoothers
1719 if selfA._toStore("CurrentEnsembleState"):
1720 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
1722 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1723 # ----------------------------------------------------------------------
1724 if selfA._parameters["EstimationOf"] == "Parameters":
1725 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1726 selfA.StoredVariables["Analysis"].store( XaMin )
1727 if selfA._toStore("APosterioriCovariance"):
1728 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1729 if selfA._toStore("BMA"):
1730 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1734 # ==============================================================================
1735 def exkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1737 Extended Kalman Filter
1739 if selfA._parameters["EstimationOf"] == "Parameters":
1740 selfA._parameters["StoreInternalVariables"] = True
1743 H = HO["Direct"].appliedControledFormTo
1745 if selfA._parameters["EstimationOf"] == "State":
1746 M = EM["Direct"].appliedControledFormTo
1748 if CM is not None and "Tangent" in CM and U is not None:
1749 Cm = CM["Tangent"].asMatrix(Xb)
1753 # Durée d'observation et tailles
1754 if hasattr(Y,"stepnumber"):
1755 duration = Y.stepnumber()
1756 __p = numpy.cumprod(Y.shape())[-1]
1759 __p = numpy.array(Y).size
1761 # Précalcul des inversions de B et R
1762 if selfA._parameters["StoreInternalVariables"] \
1763 or selfA._toStore("CostFunctionJ") \
1764 or selfA._toStore("CostFunctionJb") \
1765 or selfA._toStore("CostFunctionJo") \
1766 or selfA._toStore("CurrentOptimum") \
1767 or selfA._toStore("APosterioriCovariance"):
1773 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1776 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1777 selfA.StoredVariables["Analysis"].store( Xb )
1778 if selfA._toStore("APosterioriCovariance"):
1779 if hasattr(B,"asfullmatrix"):
1780 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1782 selfA.StoredVariables["APosterioriCovariance"].store( B )
1783 selfA._setInternalState("seed", numpy.random.get_state())
1784 elif selfA._parameters["nextStep"]:
1785 Xn = selfA._getInternalState("Xn")
1786 Pn = selfA._getInternalState("Pn")
1788 if selfA._parameters["EstimationOf"] == "Parameters":
1790 previousJMinimum = numpy.finfo(float).max
1792 for step in range(duration-1):
1793 if hasattr(Y,"store"):
1794 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1796 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1798 Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
1799 Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
1800 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1801 Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
1803 if selfA._parameters["EstimationOf"] == "State":
1804 Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
1805 Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
1806 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1807 Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
1810 if hasattr(U,"store") and len(U)>1:
1811 Un = numpy.ravel( U[step] ).reshape((-1,1))
1812 elif hasattr(U,"store") and len(U)==1:
1813 Un = numpy.ravel( U[0] ).reshape((-1,1))
1815 Un = numpy.ravel( U ).reshape((-1,1))
1819 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1820 Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
1821 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1822 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1823 Xn_predicted = Xn_predicted + Cm * Un
1824 Pn_predicted = Q + Mt * (Pn * Ma)
1825 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1826 # --- > Par principe, M = Id, Q = 0
1830 if selfA._parameters["EstimationOf"] == "State":
1831 HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
1832 _Innovation = Ynpu - HX_predicted
1833 elif selfA._parameters["EstimationOf"] == "Parameters":
1834 HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
1835 _Innovation = Ynpu - HX_predicted
1836 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
1837 _Innovation = _Innovation - Cm * Un
1839 Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
1840 Xn = Xn_predicted + Kn * _Innovation
1841 Pn = Pn_predicted - Kn * Ht * Pn_predicted
1844 #--------------------------
1845 selfA._setInternalState("Xn", Xn)
1846 selfA._setInternalState("Pn", Pn)
1847 #--------------------------
1849 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1850 # ---> avec analysis
1851 selfA.StoredVariables["Analysis"].store( Xa )
1852 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1853 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
1854 if selfA._toStore("InnovationAtCurrentAnalysis"):
1855 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1856 # ---> avec current state
1857 if selfA._parameters["StoreInternalVariables"] \
1858 or selfA._toStore("CurrentState"):
1859 selfA.StoredVariables["CurrentState"].store( Xn )
1860 if selfA._toStore("ForecastState"):
1861 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1862 if selfA._toStore("ForecastCovariance"):
1863 selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
1864 if selfA._toStore("BMA"):
1865 selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
1866 if selfA._toStore("InnovationAtCurrentState"):
1867 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1868 if selfA._toStore("SimulatedObservationAtCurrentState") \
1869 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1870 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1872 if selfA._parameters["StoreInternalVariables"] \
1873 or selfA._toStore("CostFunctionJ") \
1874 or selfA._toStore("CostFunctionJb") \
1875 or selfA._toStore("CostFunctionJo") \
1876 or selfA._toStore("CurrentOptimum") \
1877 or selfA._toStore("APosterioriCovariance"):
1878 Jb = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
1879 Jo = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
1881 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1882 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1883 selfA.StoredVariables["CostFunctionJ" ].store( J )
1885 if selfA._toStore("IndexOfOptimum") \
1886 or selfA._toStore("CurrentOptimum") \
1887 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1888 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1889 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1890 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1891 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1892 if selfA._toStore("IndexOfOptimum"):
1893 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1894 if selfA._toStore("CurrentOptimum"):
1895 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1896 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1897 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1898 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1899 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1900 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1901 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1902 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1903 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1904 if selfA._toStore("APosterioriCovariance"):
1905 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1906 if selfA._parameters["EstimationOf"] == "Parameters" \
1907 and J < previousJMinimum:
1908 previousJMinimum = J
1910 if selfA._toStore("APosterioriCovariance"):
1911 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1913 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1914 # ----------------------------------------------------------------------
1915 if selfA._parameters["EstimationOf"] == "Parameters":
1916 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1917 selfA.StoredVariables["Analysis"].store( XaMin )
1918 if selfA._toStore("APosterioriCovariance"):
1919 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1920 if selfA._toStore("BMA"):
1921 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1925 # ==============================================================================
1926 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
1927 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1931 if selfA._parameters["EstimationOf"] == "Parameters":
1932 selfA._parameters["StoreInternalVariables"] = True
1935 H = HO["Direct"].appliedControledFormTo
1937 if selfA._parameters["EstimationOf"] == "State":
1938 M = EM["Direct"].appliedControledFormTo
1940 if CM is not None and "Tangent" in CM and U is not None:
1941 Cm = CM["Tangent"].asMatrix(Xb)
1945 # Durée d'observation et tailles
1946 if hasattr(Y,"stepnumber"):
1947 duration = Y.stepnumber()
1948 __p = numpy.cumprod(Y.shape())[-1]
1951 __p = numpy.array(Y).size
1953 # Précalcul des inversions de B et R
1954 if selfA._parameters["StoreInternalVariables"] \
1955 or selfA._toStore("CostFunctionJ") \
1956 or selfA._toStore("CostFunctionJb") \
1957 or selfA._toStore("CostFunctionJo") \
1958 or selfA._toStore("CurrentOptimum") \
1959 or selfA._toStore("APosterioriCovariance"):
1964 __m = selfA._parameters["NumberOfMembers"]
1966 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1967 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1969 Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
1970 selfA.StoredVariables["Analysis"].store( Xb )
1971 if selfA._toStore("APosterioriCovariance"):
1972 if hasattr(B,"asfullmatrix"):
1973 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1975 selfA.StoredVariables["APosterioriCovariance"].store( B )
1976 selfA._setInternalState("seed", numpy.random.get_state())
1977 elif selfA._parameters["nextStep"]:
1978 Xn = selfA._getInternalState("Xn")
1980 previousJMinimum = numpy.finfo(float).max
1982 for step in range(duration-1):
1983 numpy.random.set_state(selfA._getInternalState("seed"))
1984 if hasattr(Y,"store"):
1985 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1987 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1990 if hasattr(U,"store") and len(U)>1:
1991 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1992 elif hasattr(U,"store") and len(U)==1:
1993 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1995 Un = numpy.asmatrix(numpy.ravel( U )).T
1999 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2000 Xn = CovarianceInflation( Xn,
2001 selfA._parameters["InflationType"],
2002 selfA._parameters["InflationFactor"],
2005 #--------------------------
2006 if VariantM == "IEnKF12":
2007 Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
2008 EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
2012 Ta = numpy.identity(__m)
2013 vw = numpy.zeros(__m)
2014 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2015 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
2018 E1 = vx1 + _epsilon * EaX
2020 E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
2022 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
2023 E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2025 returnSerieAsArrayMatrix = True )
2026 elif selfA._parameters["EstimationOf"] == "Parameters":
2027 # --- > Par principe, M = Id
2029 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2030 vy1 = H((vx2, Un)).reshape((__p,1))
2032 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
2034 returnSerieAsArrayMatrix = True )
2035 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2038 EaY = (HE2 - vy2) / _epsilon
2040 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
2042 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
2043 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
2044 Deltaw = - numpy.linalg.solve(mH,GradJ)
2049 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2053 A2 = EnsembleOfAnomalies( E2 )
2056 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2057 A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
2060 #--------------------------
2062 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2064 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2065 Xn = CovarianceInflation( Xn,
2066 selfA._parameters["InflationType"],
2067 selfA._parameters["InflationFactor"],
2070 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2071 #--------------------------
2072 selfA._setInternalState("Xn", Xn)
2073 selfA._setInternalState("seed", numpy.random.get_state())
2074 #--------------------------
2076 if selfA._parameters["StoreInternalVariables"] \
2077 or selfA._toStore("CostFunctionJ") \
2078 or selfA._toStore("CostFunctionJb") \
2079 or selfA._toStore("CostFunctionJo") \
2080 or selfA._toStore("APosterioriCovariance") \
2081 or selfA._toStore("InnovationAtCurrentAnalysis") \
2082 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2083 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2084 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2085 _Innovation = Ynpu - _HXa
2087 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2088 # ---> avec analysis
2089 selfA.StoredVariables["Analysis"].store( Xa )
2090 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2091 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2092 if selfA._toStore("InnovationAtCurrentAnalysis"):
2093 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2094 # ---> avec current state
2095 if selfA._parameters["StoreInternalVariables"] \
2096 or selfA._toStore("CurrentState"):
2097 selfA.StoredVariables["CurrentState"].store( Xn )
2098 if selfA._toStore("ForecastState"):
2099 selfA.StoredVariables["ForecastState"].store( E2 )
2100 if selfA._toStore("ForecastCovariance"):
2101 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(E2) )
2102 if selfA._toStore("BMA"):
2103 selfA.StoredVariables["BMA"].store( E2 - Xa )
2104 if selfA._toStore("InnovationAtCurrentState"):
2105 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
2106 if selfA._toStore("SimulatedObservationAtCurrentState") \
2107 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2108 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2110 if selfA._parameters["StoreInternalVariables"] \
2111 or selfA._toStore("CostFunctionJ") \
2112 or selfA._toStore("CostFunctionJb") \
2113 or selfA._toStore("CostFunctionJo") \
2114 or selfA._toStore("CurrentOptimum") \
2115 or selfA._toStore("APosterioriCovariance"):
2116 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2117 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2119 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2120 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2121 selfA.StoredVariables["CostFunctionJ" ].store( J )
2123 if selfA._toStore("IndexOfOptimum") \
2124 or selfA._toStore("CurrentOptimum") \
2125 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2126 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2127 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2128 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2129 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2130 if selfA._toStore("IndexOfOptimum"):
2131 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2132 if selfA._toStore("CurrentOptimum"):
2133 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2134 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2135 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2136 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2137 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2138 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2139 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2140 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2141 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2142 if selfA._toStore("APosterioriCovariance"):
2143 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2144 if selfA._parameters["EstimationOf"] == "Parameters" \
2145 and J < previousJMinimum:
2146 previousJMinimum = J
2148 if selfA._toStore("APosterioriCovariance"):
2149 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
2150 # ---> Pour les smoothers
2151 if selfA._toStore("CurrentEnsembleState"):
2152 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
2154 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2155 # ----------------------------------------------------------------------
2156 if selfA._parameters["EstimationOf"] == "Parameters":
2157 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2158 selfA.StoredVariables["Analysis"].store( XaMin )
2159 if selfA._toStore("APosterioriCovariance"):
2160 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2161 if selfA._toStore("BMA"):
2162 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2166 # ==============================================================================
2167 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2174 Hm = HO["Direct"].appliedTo
2179 Xini = selfA._parameters["InitializationPoint"]
2181 HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
2182 Innovation = Y - HXb
2189 Xr = Xini.reshape((-1,1))
2190 while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
2194 Ht = HO["Tangent"].asMatrix(Xr)
2195 Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
2197 # Définition de la fonction-coût
2198 # ------------------------------
2199 def CostFunction(dx):
2200 _dX = numpy.asmatrix(numpy.ravel( dx )).T
2201 if selfA._parameters["StoreInternalVariables"] or \
2202 selfA._toStore("CurrentState") or \
2203 selfA._toStore("CurrentOptimum"):
2204 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
2206 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
2207 _dInnovation = Innovation - _HdX
2208 if selfA._toStore("SimulatedObservationAtCurrentState") or \
2209 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2210 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
2211 if selfA._toStore("InnovationAtCurrentState"):
2212 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
2214 Jb = float( 0.5 * _dX.T * BI * _dX )
2215 Jo = float( 0.5 * _dInnovation.T * RI * _dInnovation )
2218 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2219 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2220 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2221 selfA.StoredVariables["CostFunctionJ" ].store( J )
2222 if selfA._toStore("IndexOfOptimum") or \
2223 selfA._toStore("CurrentOptimum") or \
2224 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2225 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2226 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2227 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2228 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2229 if selfA._toStore("IndexOfOptimum"):
2230 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2231 if selfA._toStore("CurrentOptimum"):
2232 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2233 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2234 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2235 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2236 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2237 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2238 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2239 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2240 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2243 def GradientOfCostFunction(dx):
2244 _dX = numpy.asmatrix(numpy.ravel( dx )).T
2246 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
2247 _dInnovation = Innovation - _HdX
2249 GradJo = - Ht.T @ (RI * _dInnovation)
2250 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2253 # Minimisation de la fonctionnelle
2254 # --------------------------------
2255 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2257 if selfA._parameters["Minimizer"] == "LBFGSB":
2258 # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
2259 if "0.19" <= scipy.version.version <= "1.1.0":
2260 import lbfgsbhlt as optimiseur
2262 import scipy.optimize as optimiseur
2263 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2264 func = CostFunction,
2265 x0 = numpy.zeros(Xini.size),
2266 fprime = GradientOfCostFunction,
2268 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
2269 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
2270 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
2271 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2272 iprint = selfA._parameters["optiprint"],
2274 nfeval = Informations['funcalls']
2275 rc = Informations['warnflag']
2276 elif selfA._parameters["Minimizer"] == "TNC":
2277 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2278 func = CostFunction,
2279 x0 = numpy.zeros(Xini.size),
2280 fprime = GradientOfCostFunction,
2282 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
2283 maxfun = selfA._parameters["MaximumNumberOfSteps"],
2284 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2285 ftol = selfA._parameters["CostDecrementTolerance"],
2286 messages = selfA._parameters["optmessages"],
2288 elif selfA._parameters["Minimizer"] == "CG":
2289 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2291 x0 = numpy.zeros(Xini.size),
2292 fprime = GradientOfCostFunction,
2294 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2295 gtol = selfA._parameters["GradientNormTolerance"],
2296 disp = selfA._parameters["optdisp"],
2299 elif selfA._parameters["Minimizer"] == "NCG":
2300 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2302 x0 = numpy.zeros(Xini.size),
2303 fprime = GradientOfCostFunction,
2305 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2306 avextol = selfA._parameters["CostDecrementTolerance"],
2307 disp = selfA._parameters["optdisp"],
2310 elif selfA._parameters["Minimizer"] == "BFGS":
2311 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2313 x0 = numpy.zeros(Xini.size),
2314 fprime = GradientOfCostFunction,
2316 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2317 gtol = selfA._parameters["GradientNormTolerance"],
2318 disp = selfA._parameters["optdisp"],
2322 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2324 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2325 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2327 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2328 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2330 Minimum = Xb + Minimum.reshape((-1,1))
2333 DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
2334 iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
2337 #--------------------------
2339 selfA.StoredVariables["Analysis"].store( Xa )
2341 if selfA._toStore("OMA") or \
2342 selfA._toStore("SigmaObs2") or \
2343 selfA._toStore("SimulationQuantiles") or \
2344 selfA._toStore("SimulatedObservationAtOptimum"):
2345 if selfA._toStore("SimulatedObservationAtCurrentState"):
2346 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2347 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2348 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2352 if selfA._toStore("APosterioriCovariance") or \
2353 selfA._toStore("SimulationQuantiles") or \
2354 selfA._toStore("JacobianMatrixAtOptimum") or \
2355 selfA._toStore("KalmanGainAtOptimum"):
2356 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2357 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2358 if selfA._toStore("APosterioriCovariance") or \
2359 selfA._toStore("SimulationQuantiles") or \
2360 selfA._toStore("KalmanGainAtOptimum"):
2361 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2362 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2363 if selfA._toStore("APosterioriCovariance") or \
2364 selfA._toStore("SimulationQuantiles"):
2365 A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
2366 if selfA._toStore("APosterioriCovariance"):
2367 selfA.StoredVariables["APosterioriCovariance"].store( A )
2368 if selfA._toStore("JacobianMatrixAtOptimum"):
2369 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2370 if selfA._toStore("KalmanGainAtOptimum"):
2371 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2372 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2373 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2375 # Calculs et/ou stockages supplémentaires
2376 # ---------------------------------------
2377 if selfA._toStore("Innovation") or \
2378 selfA._toStore("SigmaObs2") or \
2379 selfA._toStore("MahalanobisConsistency") or \
2380 selfA._toStore("OMB"):
2382 if selfA._toStore("Innovation"):
2383 selfA.StoredVariables["Innovation"].store( d )
2384 if selfA._toStore("BMA"):
2385 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2386 if selfA._toStore("OMA"):
2387 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2388 if selfA._toStore("OMB"):
2389 selfA.StoredVariables["OMB"].store( d )
2390 if selfA._toStore("SigmaObs2"):
2391 TraceR = R.trace(Y.size)
2392 selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
2393 if selfA._toStore("MahalanobisConsistency"):
2394 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2395 if selfA._toStore("SimulationQuantiles"):
2396 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
2397 if selfA._toStore("SimulatedObservationAtBackground"):
2398 selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
2399 if selfA._toStore("SimulatedObservationAtOptimum"):
2400 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
2404 # ==============================================================================
2405 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
2406 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
2408 Maximum Likelihood Ensemble Filter
2410 if selfA._parameters["EstimationOf"] == "Parameters":
2411 selfA._parameters["StoreInternalVariables"] = True
2414 H = HO["Direct"].appliedControledFormTo
2416 if selfA._parameters["EstimationOf"] == "State":
2417 M = EM["Direct"].appliedControledFormTo
2419 if CM is not None and "Tangent" in CM and U is not None:
2420 Cm = CM["Tangent"].asMatrix(Xb)
2424 # Durée d'observation et tailles
2425 if hasattr(Y,"stepnumber"):
2426 duration = Y.stepnumber()
2427 __p = numpy.cumprod(Y.shape())[-1]
2430 __p = numpy.array(Y).size
2432 # Précalcul des inversions de B et R
2433 if selfA._parameters["StoreInternalVariables"] \
2434 or selfA._toStore("CostFunctionJ") \
2435 or selfA._toStore("CostFunctionJb") \
2436 or selfA._toStore("CostFunctionJo") \
2437 or selfA._toStore("CurrentOptimum") \
2438 or selfA._toStore("APosterioriCovariance"):
2443 __m = selfA._parameters["NumberOfMembers"]
2445 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2446 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2447 selfA.StoredVariables["Analysis"].store( Xb )
2448 if selfA._toStore("APosterioriCovariance"):
2449 if hasattr(B,"asfullmatrix"):
2450 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
2452 selfA.StoredVariables["APosterioriCovariance"].store( B )
2453 selfA._setInternalState("seed", numpy.random.get_state())
2454 elif selfA._parameters["nextStep"]:
2455 Xn = selfA._getInternalState("Xn")
2457 previousJMinimum = numpy.finfo(float).max
2459 for step in range(duration-1):
2460 numpy.random.set_state(selfA._getInternalState("seed"))
2461 if hasattr(Y,"store"):
2462 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2464 Ynpu = numpy.ravel( Y ).reshape((__p,1))
2467 if hasattr(U,"store") and len(U)>1:
2468 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2469 elif hasattr(U,"store") and len(U)==1:
2470 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2472 Un = numpy.asmatrix(numpy.ravel( U )).T
2476 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2477 Xn = CovarianceInflation( Xn,
2478 selfA._parameters["InflationType"],
2479 selfA._parameters["InflationFactor"],
2482 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2483 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2485 returnSerieAsArrayMatrix = True )
2486 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
2487 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2488 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2489 Xn_predicted = Xn_predicted + Cm * Un
2490 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2491 # --- > Par principe, M = Id, Q = 0
2492 Xn_predicted = EMX = Xn
2494 #--------------------------
2495 if VariantM == "MLEF13":
2496 Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
2497 EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
2498 Ua = numpy.identity(__m)
2502 Ta = numpy.identity(__m)
2503 vw = numpy.zeros(__m)
2504 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2505 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
2508 E1 = vx1 + _epsilon * EaX
2510 E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
2512 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2514 returnSerieAsArrayMatrix = True )
2515 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2518 EaY = (HE2 - vy2) / _epsilon
2520 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
2522 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
2523 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
2524 Deltaw = - numpy.linalg.solve(mH,GradJ)
2529 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2534 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2536 Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
2537 #--------------------------
2539 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2541 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2542 Xn = CovarianceInflation( Xn,
2543 selfA._parameters["InflationType"],
2544 selfA._parameters["InflationFactor"],
2547 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2548 #--------------------------
2549 selfA._setInternalState("Xn", Xn)
2550 selfA._setInternalState("seed", numpy.random.get_state())
2551 #--------------------------
2553 if selfA._parameters["StoreInternalVariables"] \
2554 or selfA._toStore("CostFunctionJ") \
2555 or selfA._toStore("CostFunctionJb") \
2556 or selfA._toStore("CostFunctionJo") \
2557 or selfA._toStore("APosterioriCovariance") \
2558 or selfA._toStore("InnovationAtCurrentAnalysis") \
2559 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2560 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2561 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2562 _Innovation = Ynpu - _HXa
2564 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2565 # ---> avec analysis
2566 selfA.StoredVariables["Analysis"].store( Xa )
2567 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2568 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2569 if selfA._toStore("InnovationAtCurrentAnalysis"):
2570 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2571 # ---> avec current state
2572 if selfA._parameters["StoreInternalVariables"] \
2573 or selfA._toStore("CurrentState"):
2574 selfA.StoredVariables["CurrentState"].store( Xn )
2575 if selfA._toStore("ForecastState"):
2576 selfA.StoredVariables["ForecastState"].store( EMX )
2577 if selfA._toStore("ForecastCovariance"):
2578 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
2579 if selfA._toStore("BMA"):
2580 selfA.StoredVariables["BMA"].store( EMX - Xa )
2581 if selfA._toStore("InnovationAtCurrentState"):
2582 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
2583 if selfA._toStore("SimulatedObservationAtCurrentState") \
2584 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2585 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2587 if selfA._parameters["StoreInternalVariables"] \
2588 or selfA._toStore("CostFunctionJ") \
2589 or selfA._toStore("CostFunctionJb") \
2590 or selfA._toStore("CostFunctionJo") \
2591 or selfA._toStore("CurrentOptimum") \
2592 or selfA._toStore("APosterioriCovariance"):
2593 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2594 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2596 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2597 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2598 selfA.StoredVariables["CostFunctionJ" ].store( J )
2600 if selfA._toStore("IndexOfOptimum") \
2601 or selfA._toStore("CurrentOptimum") \
2602 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2603 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2604 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2605 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2606 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2607 if selfA._toStore("IndexOfOptimum"):
2608 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2609 if selfA._toStore("CurrentOptimum"):
2610 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2611 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2612 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2613 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2614 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2615 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2616 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2617 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2618 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2619 if selfA._toStore("APosterioriCovariance"):
2620 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2621 if selfA._parameters["EstimationOf"] == "Parameters" \
2622 and J < previousJMinimum:
2623 previousJMinimum = J
2625 if selfA._toStore("APosterioriCovariance"):
2626 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
2627 # ---> Pour les smoothers
2628 if selfA._toStore("CurrentEnsembleState"):
2629 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
2631 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2632 # ----------------------------------------------------------------------
2633 if selfA._parameters["EstimationOf"] == "Parameters":
2634 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2635 selfA.StoredVariables["Analysis"].store( XaMin )
2636 if selfA._toStore("APosterioriCovariance"):
2637 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2638 if selfA._toStore("BMA"):
2639 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2643 # ==============================================================================
2655 Implémentation informatique de l'algorithme MMQR, basée sur la publication :
2656 David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
2657 Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
2660 # Recuperation des donnees et informations initiales
2661 # --------------------------------------------------
2662 variables = numpy.ravel( x0 )
2663 mesures = numpy.ravel( y )
2664 increment = sys.float_info[0]
2667 quantile = float(quantile)
2669 # Calcul des parametres du MM
2670 # ---------------------------
2671 tn = float(toler) / n
2672 e0 = -tn / math.log(tn)
2673 epsilon = (e0-tn)/(1+math.log(e0))
2675 # Calculs d'initialisation
2676 # ------------------------
2677 residus = mesures - numpy.ravel( func( variables ) )
2678 poids = 1./(epsilon+numpy.abs(residus))
2679 veps = 1. - 2. * quantile - residus * poids
2680 lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
2683 # Recherche iterative
2684 # -------------------
2685 while (increment > toler) and (iteration < maxfun) :
2688 Derivees = numpy.array(fprime(variables))
2689 Derivees = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
2690 DeriveesT = Derivees.transpose()
2691 M = numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
2692 SM = numpy.transpose(numpy.dot( DeriveesT , veps ))
2693 step = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
2695 variables = variables + step
2696 if bounds is not None:
2697 # Attention : boucle infinie à éviter si un intervalle est trop petit
2698 while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
2700 variables = variables - step
2701 residus = mesures - numpy.ravel( func(variables) )
2702 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2704 while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
2706 variables = variables - step
2707 residus = mesures - numpy.ravel( func(variables) )
2708 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2710 increment = lastsurrogate-surrogate
2711 poids = 1./(epsilon+numpy.abs(residus))
2712 veps = 1. - 2. * quantile - residus * poids
2713 lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
2717 Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
2719 return variables, Ecart, [n,p,iteration,increment,0]
2721 # ==============================================================================
2722 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
2724 3DVAR multi-pas et multi-méthodes
2729 if selfA._parameters["EstimationOf"] == "State":
2730 M = EM["Direct"].appliedTo
2732 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2733 Xn = numpy.ravel(Xb).reshape((-1,1))
2734 selfA.StoredVariables["Analysis"].store( Xn )
2735 if selfA._toStore("APosterioriCovariance"):
2736 if hasattr(B,"asfullmatrix"):
2737 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(Xn.size) )
2739 selfA.StoredVariables["APosterioriCovariance"].store( B )
2740 if selfA._toStore("ForecastState"):
2741 selfA.StoredVariables["ForecastState"].store( Xn )
2742 elif selfA._parameters["nextStep"]:
2743 Xn = selfA._getInternalState("Xn")
2745 Xn = numpy.ravel(Xb).reshape((-1,1))
2747 if hasattr(Y,"stepnumber"):
2748 duration = Y.stepnumber()
2753 for step in range(duration-1):
2754 if hasattr(Y,"store"):
2755 Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
2757 Ynpu = numpy.ravel( Y ).reshape((-1,1))
2759 if selfA._parameters["EstimationOf"] == "State": # Forecast
2760 Xn_predicted = M( Xn )
2761 if selfA._toStore("ForecastState"):
2762 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
2763 elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
2764 # --- > Par principe, M = Id, Q = 0
2766 Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
2768 oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
2770 Xn = selfA.StoredVariables["Analysis"][-1]
2771 #--------------------------
2772 selfA._setInternalState("Xn", Xn)
2776 # ==============================================================================
2777 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2784 Hm = HO["Direct"].appliedTo
2786 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2787 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2790 HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2791 if Y.size != HXb.size:
2792 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2793 if max(Y.shape) != max(HXb.shape):
2794 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2796 if selfA._toStore("JacobianMatrixAtBackground"):
2797 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2798 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2799 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2801 Ht = HO["Tangent"].asMatrix(Xb)
2803 HBHTpR = R + Ht * BHT
2804 Innovation = Y - HXb
2806 Xini = numpy.zeros(Xb.shape)
2808 # Définition de la fonction-coût
2809 # ------------------------------
2810 def CostFunction(w):
2811 _W = w.reshape((-1,1))
2812 if selfA._parameters["StoreInternalVariables"] or \
2813 selfA._toStore("CurrentState") or \
2814 selfA._toStore("CurrentOptimum"):
2815 selfA.StoredVariables["CurrentState"].store( Xb + BHT @ _W )
2816 if selfA._toStore("SimulatedObservationAtCurrentState") or \
2817 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2818 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT @ _W ) )
2819 if selfA._toStore("InnovationAtCurrentState"):
2820 selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
2822 Jb = float( 0.5 * _W.T @ (HBHTpR @ _W) )
2823 Jo = float( - _W.T @ Innovation )
2826 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2827 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2828 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2829 selfA.StoredVariables["CostFunctionJ" ].store( J )
2830 if selfA._toStore("IndexOfOptimum") or \
2831 selfA._toStore("CurrentOptimum") or \
2832 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2833 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2834 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2835 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2836 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2837 if selfA._toStore("IndexOfOptimum"):
2838 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2839 if selfA._toStore("CurrentOptimum"):
2840 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2841 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2842 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2843 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2844 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2845 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2846 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2847 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2848 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2851 def GradientOfCostFunction(w):
2852 _W = w.reshape((-1,1))
2853 GradJb = HBHTpR @ _W
2854 GradJo = - Innovation
2855 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2858 # Minimisation de la fonctionnelle
2859 # --------------------------------
2860 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2862 if selfA._parameters["Minimizer"] == "LBFGSB":
2863 if "0.19" <= scipy.version.version <= "1.1.0":
2864 import lbfgsbhlt as optimiseur
2866 import scipy.optimize as optimiseur
2867 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2868 func = CostFunction,
2870 fprime = GradientOfCostFunction,
2872 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
2873 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
2874 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
2875 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2876 iprint = selfA._parameters["optiprint"],
2878 nfeval = Informations['funcalls']
2879 rc = Informations['warnflag']
2880 elif selfA._parameters["Minimizer"] == "TNC":
2881 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2882 func = CostFunction,
2884 fprime = GradientOfCostFunction,
2886 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
2887 maxfun = selfA._parameters["MaximumNumberOfSteps"],
2888 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2889 ftol = selfA._parameters["CostDecrementTolerance"],
2890 messages = selfA._parameters["optmessages"],
2892 elif selfA._parameters["Minimizer"] == "CG":
2893 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2896 fprime = GradientOfCostFunction,
2898 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2899 gtol = selfA._parameters["GradientNormTolerance"],
2900 disp = selfA._parameters["optdisp"],
2903 elif selfA._parameters["Minimizer"] == "NCG":
2904 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2907 fprime = GradientOfCostFunction,
2909 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2910 avextol = selfA._parameters["CostDecrementTolerance"],
2911 disp = selfA._parameters["optdisp"],
2914 elif selfA._parameters["Minimizer"] == "BFGS":
2915 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2918 fprime = GradientOfCostFunction,
2920 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2921 gtol = selfA._parameters["GradientNormTolerance"],
2922 disp = selfA._parameters["optdisp"],
2926 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2928 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2929 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2931 # Correction pour pallier a un bug de TNC sur le retour du Minimum
2932 # ----------------------------------------------------------------
2933 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2934 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2936 Minimum = Xb + BHT @ Minimum.reshape((-1,1))
2939 #--------------------------
2941 selfA.StoredVariables["Analysis"].store( Xa )
2943 if selfA._toStore("OMA") or \
2944 selfA._toStore("SigmaObs2") or \
2945 selfA._toStore("SimulationQuantiles") or \
2946 selfA._toStore("SimulatedObservationAtOptimum"):
2947 if selfA._toStore("SimulatedObservationAtCurrentState"):
2948 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2949 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2950 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2954 if selfA._toStore("APosterioriCovariance") or \
2955 selfA._toStore("SimulationQuantiles") or \
2956 selfA._toStore("JacobianMatrixAtOptimum") or \
2957 selfA._toStore("KalmanGainAtOptimum"):
2958 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2959 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2960 if selfA._toStore("APosterioriCovariance") or \
2961 selfA._toStore("SimulationQuantiles") or \
2962 selfA._toStore("KalmanGainAtOptimum"):
2963 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2964 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2965 if selfA._toStore("APosterioriCovariance") or \
2966 selfA._toStore("SimulationQuantiles"):
2969 A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
2970 if selfA._toStore("APosterioriCovariance"):
2971 selfA.StoredVariables["APosterioriCovariance"].store( A )
2972 if selfA._toStore("JacobianMatrixAtOptimum"):
2973 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2974 if selfA._toStore("KalmanGainAtOptimum"):
2975 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2976 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2977 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2979 # Calculs et/ou stockages supplémentaires
2980 # ---------------------------------------
2981 if selfA._toStore("Innovation") or \
2982 selfA._toStore("SigmaObs2") or \
2983 selfA._toStore("MahalanobisConsistency") or \
2984 selfA._toStore("OMB"):
2986 if selfA._toStore("Innovation"):
2987 selfA.StoredVariables["Innovation"].store( d )
2988 if selfA._toStore("BMA"):
2989 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2990 if selfA._toStore("OMA"):
2991 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2992 if selfA._toStore("OMB"):
2993 selfA.StoredVariables["OMB"].store( d )
2994 if selfA._toStore("SigmaObs2"):
2995 TraceR = R.trace(Y.size)
2996 selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
2997 if selfA._toStore("MahalanobisConsistency"):
2998 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2999 if selfA._toStore("SimulationQuantiles"):
3000 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
3001 if selfA._toStore("SimulatedObservationAtBackground"):
3002 selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
3003 if selfA._toStore("SimulatedObservationAtOptimum"):
3004 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
3008 # ==============================================================================
3009 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16"):
3013 if selfA._parameters["EstimationOf"] == "Parameters":
3014 selfA._parameters["StoreInternalVariables"] = True
3017 H = HO["Direct"].appliedControledFormTo
3019 if selfA._parameters["EstimationOf"] == "State":
3020 M = EM["Direct"].appliedControledFormTo
3022 if CM is not None and "Tangent" in CM and U is not None:
3023 Cm = CM["Tangent"].asMatrix(Xb)
3027 # Durée d'observation et tailles
3028 if hasattr(Y,"stepnumber"):
3029 duration = Y.stepnumber()
3030 __p = numpy.cumprod(Y.shape())[-1]
3033 __p = numpy.array(Y).size
3035 # Précalcul des inversions de B et R
3036 if selfA._parameters["StoreInternalVariables"] \
3037 or selfA._toStore("CostFunctionJ") \
3038 or selfA._toStore("CostFunctionJb") \
3039 or selfA._toStore("CostFunctionJo") \
3040 or selfA._toStore("CurrentOptimum") \
3041 or selfA._toStore("APosterioriCovariance"):
3046 __m = selfA._parameters["NumberOfMembers"]
3048 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
3051 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
3052 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
3054 Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
3055 selfA.StoredVariables["Analysis"].store( Xb )
3056 if selfA._toStore("APosterioriCovariance"):
3057 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3058 selfA._setInternalState("seed", numpy.random.get_state())
3059 elif selfA._parameters["nextStep"]:
3060 Xn = selfA._getInternalState("Xn")
3062 previousJMinimum = numpy.finfo(float).max
3064 for step in range(duration-1):
3065 numpy.random.set_state(selfA._getInternalState("seed"))
3066 if hasattr(Y,"store"):
3067 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
3069 Ynpu = numpy.ravel( Y ).reshape((__p,1))
3072 if hasattr(U,"store") and len(U)>1:
3073 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
3074 elif hasattr(U,"store") and len(U)==1:
3075 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
3077 Un = numpy.asmatrix(numpy.ravel( U )).T
3081 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
3082 Xn = CovarianceInflation( Xn,
3083 selfA._parameters["InflationType"],
3084 selfA._parameters["InflationFactor"],
3087 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
3088 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
3090 returnSerieAsArrayMatrix = True )
3091 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
3092 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
3094 returnSerieAsArrayMatrix = True )
3095 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
3096 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
3097 Xn_predicted = Xn_predicted + Cm * Un
3098 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
3099 # --- > Par principe, M = Id, Q = 0
3100 Xn_predicted = EMX = Xn
3101 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
3103 returnSerieAsArrayMatrix = True )
3105 # Mean of forecast and observation of forecast
3106 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
3107 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
3109 #--------------------------
3110 if VariantM == "KalmanFilterFormula05":
3111 PfHT, HPfHT = 0., 0.
3112 for i in range(__m):
3113 Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
3114 Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
3115 PfHT += Exfi * Eyfi.T
3116 HPfHT += Eyfi * Eyfi.T
3117 PfHT = (1./(__m-1)) * PfHT
3118 HPfHT = (1./(__m-1)) * HPfHT
3119 Kn = PfHT * ( R + HPfHT ).I
3122 for i in range(__m):
3123 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
3124 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
3125 #--------------------------
3126 elif VariantM == "KalmanFilterFormula16":
3127 EpY = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
3128 EpYm = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
3130 EaX = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
3131 EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
3133 Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
3135 for i in range(__m):
3136 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
3137 #--------------------------
3139 raise ValueError("VariantM has to be chosen in the authorized methods list.")
3141 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
3142 Xn = CovarianceInflation( Xn,
3143 selfA._parameters["InflationType"],
3144 selfA._parameters["InflationFactor"],
3147 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
3148 #--------------------------
3149 selfA._setInternalState("Xn", Xn)
3150 selfA._setInternalState("seed", numpy.random.get_state())
3151 #--------------------------
3153 if selfA._parameters["StoreInternalVariables"] \
3154 or selfA._toStore("CostFunctionJ") \
3155 or selfA._toStore("CostFunctionJb") \
3156 or selfA._toStore("CostFunctionJo") \
3157 or selfA._toStore("APosterioriCovariance") \
3158 or selfA._toStore("InnovationAtCurrentAnalysis") \
3159 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
3160 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3161 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
3162 _Innovation = Ynpu - _HXa
3164 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3165 # ---> avec analysis
3166 selfA.StoredVariables["Analysis"].store( Xa )
3167 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
3168 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
3169 if selfA._toStore("InnovationAtCurrentAnalysis"):
3170 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
3171 # ---> avec current state
3172 if selfA._parameters["StoreInternalVariables"] \
3173 or selfA._toStore("CurrentState"):
3174 selfA.StoredVariables["CurrentState"].store( Xn )
3175 if selfA._toStore("ForecastState"):
3176 selfA.StoredVariables["ForecastState"].store( EMX )
3177 if selfA._toStore("ForecastCovariance"):
3178 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
3179 if selfA._toStore("BMA"):
3180 selfA.StoredVariables["BMA"].store( EMX - Xa )
3181 if selfA._toStore("InnovationAtCurrentState"):
3182 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
3183 if selfA._toStore("SimulatedObservationAtCurrentState") \
3184 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3185 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
3187 if selfA._parameters["StoreInternalVariables"] \
3188 or selfA._toStore("CostFunctionJ") \
3189 or selfA._toStore("CostFunctionJb") \
3190 or selfA._toStore("CostFunctionJo") \
3191 or selfA._toStore("CurrentOptimum") \
3192 or selfA._toStore("APosterioriCovariance"):
3193 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
3194 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
3196 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3197 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3198 selfA.StoredVariables["CostFunctionJ" ].store( J )
3200 if selfA._toStore("IndexOfOptimum") \
3201 or selfA._toStore("CurrentOptimum") \
3202 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
3203 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
3204 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
3205 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3206 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3207 if selfA._toStore("IndexOfOptimum"):
3208 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3209 if selfA._toStore("CurrentOptimum"):
3210 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
3211 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3212 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
3213 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3214 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3215 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3216 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3217 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3218 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3219 if selfA._toStore("APosterioriCovariance"):
3220 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
3221 if selfA._parameters["EstimationOf"] == "Parameters" \
3222 and J < previousJMinimum:
3223 previousJMinimum = J
3225 if selfA._toStore("APosterioriCovariance"):
3226 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
3227 # ---> Pour les smoothers
3228 if selfA._toStore("CurrentEnsembleState"):
3229 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
3231 # Stockage final supplémentaire de l'optimum en estimation de paramètres
3232 # ----------------------------------------------------------------------
3233 if selfA._parameters["EstimationOf"] == "Parameters":
3234 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3235 selfA.StoredVariables["Analysis"].store( XaMin )
3236 if selfA._toStore("APosterioriCovariance"):
3237 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3238 if selfA._toStore("BMA"):
3239 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3243 # ==============================================================================
3244 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3251 Hm = HO["Direct"].appliedTo
3252 Ha = HO["Adjoint"].appliedInXTo
3254 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
3255 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
3258 HXb = HXb.reshape((-1,1))
3259 if Y.size != HXb.size:
3260 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
3261 if max(Y.shape) != max(HXb.shape):
3262 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
3264 if selfA._toStore("JacobianMatrixAtBackground"):
3265 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
3266 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
3267 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
3272 Xini = selfA._parameters["InitializationPoint"]
3274 # Définition de la fonction-coût
3275 # ------------------------------
3276 def CostFunction(x):
3277 _X = numpy.ravel( x ).reshape((-1,1))
3278 if selfA._parameters["StoreInternalVariables"] or \
3279 selfA._toStore("CurrentState") or \
3280 selfA._toStore("CurrentOptimum"):
3281 selfA.StoredVariables["CurrentState"].store( _X )
3282 _HX = Hm( _X ).reshape((-1,1))
3283 _Innovation = Y - _HX
3284 if selfA._toStore("SimulatedObservationAtCurrentState") or \
3285 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3286 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
3287 if selfA._toStore("InnovationAtCurrentState"):
3288 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3290 Jb = float( 0.5 * (_X - Xb).T * (BI * (_X - Xb)) )
3291 Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
3294 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3295 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3296 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3297 selfA.StoredVariables["CostFunctionJ" ].store( J )
3298 if selfA._toStore("IndexOfOptimum") or \
3299 selfA._toStore("CurrentOptimum") or \
3300 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3301 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3302 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
3303 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3304 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3305 if selfA._toStore("IndexOfOptimum"):
3306 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3307 if selfA._toStore("CurrentOptimum"):
3308 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3309 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3310 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
3311 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3312 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3313 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3314 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3315 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3316 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3319 def GradientOfCostFunction(x):
3320 _X = x.reshape((-1,1))
3321 _HX = Hm( _X ).reshape((-1,1))
3322 GradJb = BI * (_X - Xb)
3323 GradJo = - Ha( (_X, RI * (Y - _HX)) )
3324 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
3327 # Minimisation de la fonctionnelle
3328 # --------------------------------
3329 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3331 if selfA._parameters["Minimizer"] == "LBFGSB":
3332 if "0.19" <= scipy.version.version <= "1.1.0":
3333 import lbfgsbhlt as optimiseur
3335 import scipy.optimize as optimiseur
3336 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3337 func = CostFunction,
3339 fprime = GradientOfCostFunction,
3341 bounds = selfA._parameters["Bounds"],
3342 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
3343 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
3344 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3345 iprint = selfA._parameters["optiprint"],
3347 nfeval = Informations['funcalls']
3348 rc = Informations['warnflag']
3349 elif selfA._parameters["Minimizer"] == "TNC":
3350 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3351 func = CostFunction,
3353 fprime = GradientOfCostFunction,
3355 bounds = selfA._parameters["Bounds"],
3356 maxfun = selfA._parameters["MaximumNumberOfSteps"],
3357 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3358 ftol = selfA._parameters["CostDecrementTolerance"],
3359 messages = selfA._parameters["optmessages"],
3361 elif selfA._parameters["Minimizer"] == "CG":
3362 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3365 fprime = GradientOfCostFunction,
3367 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3368 gtol = selfA._parameters["GradientNormTolerance"],
3369 disp = selfA._parameters["optdisp"],
3372 elif selfA._parameters["Minimizer"] == "NCG":
3373 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3376 fprime = GradientOfCostFunction,
3378 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3379 avextol = selfA._parameters["CostDecrementTolerance"],
3380 disp = selfA._parameters["optdisp"],
3383 elif selfA._parameters["Minimizer"] == "BFGS":
3384 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3387 fprime = GradientOfCostFunction,
3389 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3390 gtol = selfA._parameters["GradientNormTolerance"],
3391 disp = selfA._parameters["optdisp"],
3395 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3397 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3398 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3400 # Correction pour pallier a un bug de TNC sur le retour du Minimum
3401 # ----------------------------------------------------------------
3402 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3403 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3406 #--------------------------
3408 selfA.StoredVariables["Analysis"].store( Xa )
3410 if selfA._toStore("OMA") or \
3411 selfA._toStore("SigmaObs2") or \
3412 selfA._toStore("SimulationQuantiles") or \
3413 selfA._toStore("SimulatedObservationAtOptimum"):
3414 if selfA._toStore("SimulatedObservationAtCurrentState"):
3415 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3416 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3417 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3421 if selfA._toStore("APosterioriCovariance") or \
3422 selfA._toStore("SimulationQuantiles") or \
3423 selfA._toStore("JacobianMatrixAtOptimum") or \
3424 selfA._toStore("KalmanGainAtOptimum"):
3425 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3426 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3427 if selfA._toStore("APosterioriCovariance") or \
3428 selfA._toStore("SimulationQuantiles") or \
3429 selfA._toStore("KalmanGainAtOptimum"):
3430 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3431 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3432 if selfA._toStore("APosterioriCovariance") or \
3433 selfA._toStore("SimulationQuantiles"):
3434 A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
3435 if selfA._toStore("APosterioriCovariance"):
3436 selfA.StoredVariables["APosterioriCovariance"].store( A )
3437 if selfA._toStore("JacobianMatrixAtOptimum"):
3438 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3439 if selfA._toStore("KalmanGainAtOptimum"):
3440 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3441 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3442 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3444 # Calculs et/ou stockages supplémentaires
3445 # ---------------------------------------
3446 if selfA._toStore("Innovation") or \
3447 selfA._toStore("SigmaObs2") or \
3448 selfA._toStore("MahalanobisConsistency") or \
3449 selfA._toStore("OMB"):
3451 if selfA._toStore("Innovation"):
3452 selfA.StoredVariables["Innovation"].store( d )
3453 if selfA._toStore("BMA"):
3454 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3455 if selfA._toStore("OMA"):
3456 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3457 if selfA._toStore("OMB"):
3458 selfA.StoredVariables["OMB"].store( d )
3459 if selfA._toStore("SigmaObs2"):
3460 TraceR = R.trace(Y.size)
3461 selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
3462 if selfA._toStore("MahalanobisConsistency"):
3463 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3464 if selfA._toStore("SimulationQuantiles"):
3465 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
3466 if selfA._toStore("SimulatedObservationAtBackground"):
3467 selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
3468 if selfA._toStore("SimulatedObservationAtOptimum"):
3469 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
3473 # ==============================================================================
3474 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3483 Hm = HO["Direct"].appliedControledFormTo
3484 Mm = EM["Direct"].appliedControledFormTo
3486 if CM is not None and "Tangent" in CM and U is not None:
3487 Cm = CM["Tangent"].asMatrix(Xb)
3493 if hasattr(U,"store") and 1<=_step<len(U) :
3494 _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
3495 elif hasattr(U,"store") and len(U)==1:
3496 _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
3498 _Un = numpy.asmatrix(numpy.ravel( U )).T
3503 if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
3504 _Cm = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
3510 # Remarque : les observations sont exploitées à partir du pas de temps
3511 # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
3512 # Donc le pas 0 n'est pas utilisé puisque la première étape commence
3513 # avec l'observation du pas 1.
3515 # Nombre de pas identique au nombre de pas d'observations
3516 if hasattr(Y,"stepnumber"):
3517 duration = Y.stepnumber()
3521 # Précalcul des inversions de B et R
3525 # Point de démarrage de l'optimisation
3526 Xini = selfA._parameters["InitializationPoint"]
3528 # Définition de la fonction-coût
3529 # ------------------------------
3530 selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
3531 selfA.DirectInnovation = [None,] # Le pas 0 n'est pas observé
3532 def CostFunction(x):
3533 _X = numpy.asmatrix(numpy.ravel( x )).T
3534 if selfA._parameters["StoreInternalVariables"] or \
3535 selfA._toStore("CurrentState") or \
3536 selfA._toStore("CurrentOptimum"):
3537 selfA.StoredVariables["CurrentState"].store( _X )
3538 Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
3539 selfA.DirectCalculation = [None,]
3540 selfA.DirectInnovation = [None,]
3543 for step in range(0,duration-1):
3544 if hasattr(Y,"store"):
3545 _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
3547 _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
3551 if selfA._parameters["EstimationOf"] == "State":
3552 _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
3553 elif selfA._parameters["EstimationOf"] == "Parameters":
3556 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3557 _Xn = ApplyBounds( _Xn, ForceNumericBounds(selfA._parameters["Bounds"]) )
3559 # Etape de différence aux observations
3560 if selfA._parameters["EstimationOf"] == "State":
3561 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
3562 elif selfA._parameters["EstimationOf"] == "Parameters":
3563 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
3565 # Stockage de l'état
3566 selfA.DirectCalculation.append( _Xn )
3567 selfA.DirectInnovation.append( _YmHMX )
3569 # Ajout dans la fonctionnelle d'observation
3570 Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
3573 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3574 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3575 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3576 selfA.StoredVariables["CostFunctionJ" ].store( J )
3577 if selfA._toStore("IndexOfOptimum") or \
3578 selfA._toStore("CurrentOptimum") or \
3579 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3580 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3581 selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3582 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3583 if selfA._toStore("IndexOfOptimum"):
3584 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3585 if selfA._toStore("CurrentOptimum"):
3586 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3587 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3588 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3589 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3590 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3591 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3592 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3595 def GradientOfCostFunction(x):
3596 _X = numpy.asmatrix(numpy.ravel( x )).T
3597 GradJb = BI * (_X - Xb)
3599 for step in range(duration-1,0,-1):
3600 # Étape de récupération du dernier stockage de l'évolution
3601 _Xn = selfA.DirectCalculation.pop()
3602 # Étape de récupération du dernier stockage de l'innovation
3603 _YmHMX = selfA.DirectInnovation.pop()
3604 # Calcul des adjoints
3605 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
3606 Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
3607 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
3608 Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
3609 # Calcul du gradient par état adjoint
3610 GradJo = GradJo + Ha * (RI * _YmHMX) # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
3611 GradJo = Ma * GradJo # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
3612 GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
3615 # Minimisation de la fonctionnelle
3616 # --------------------------------
3617 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3619 if selfA._parameters["Minimizer"] == "LBFGSB":
3620 if "0.19" <= scipy.version.version <= "1.1.0":
3621 import lbfgsbhlt as optimiseur
3623 import scipy.optimize as optimiseur
3624 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3625 func = CostFunction,
3627 fprime = GradientOfCostFunction,
3629 bounds = selfA._parameters["Bounds"],
3630 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
3631 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
3632 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3633 iprint = selfA._parameters["optiprint"],
3635 nfeval = Informations['funcalls']
3636 rc = Informations['warnflag']
3637 elif selfA._parameters["Minimizer"] == "TNC":
3638 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3639 func = CostFunction,
3641 fprime = GradientOfCostFunction,
3643 bounds = selfA._parameters["Bounds"],
3644 maxfun = selfA._parameters["MaximumNumberOfSteps"],
3645 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3646 ftol = selfA._parameters["CostDecrementTolerance"],
3647 messages = selfA._parameters["optmessages"],
3649 elif selfA._parameters["Minimizer"] == "CG":
3650 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3653 fprime = GradientOfCostFunction,
3655 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3656 gtol = selfA._parameters["GradientNormTolerance"],
3657 disp = selfA._parameters["optdisp"],
3660 elif selfA._parameters["Minimizer"] == "NCG":
3661 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3664 fprime = GradientOfCostFunction,
3666 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3667 avextol = selfA._parameters["CostDecrementTolerance"],
3668 disp = selfA._parameters["optdisp"],
3671 elif selfA._parameters["Minimizer"] == "BFGS":
3672 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3675 fprime = GradientOfCostFunction,
3677 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3678 gtol = selfA._parameters["GradientNormTolerance"],
3679 disp = selfA._parameters["optdisp"],
3683 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3685 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3686 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3688 # Correction pour pallier a un bug de TNC sur le retour du Minimum
3689 # ----------------------------------------------------------------
3690 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3691 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3693 # Obtention de l'analyse
3694 # ----------------------
3697 selfA.StoredVariables["Analysis"].store( Xa )
3699 # Calculs et/ou stockages supplémentaires
3700 # ---------------------------------------
3701 if selfA._toStore("BMA"):
3702 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3706 # ==============================================================================
3707 def stdkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3709 Standard Kalman Filter
3711 if selfA._parameters["EstimationOf"] == "Parameters":
3712 selfA._parameters["StoreInternalVariables"] = True
3716 Ht = HO["Tangent"].asMatrix(Xb)
3717 Ha = HO["Adjoint"].asMatrix(Xb)
3719 if selfA._parameters["EstimationOf"] == "State":
3720 Mt = EM["Tangent"].asMatrix(Xb)
3721 Ma = EM["Adjoint"].asMatrix(Xb)
3723 if CM is not None and "Tangent" in CM and U is not None:
3724 Cm = CM["Tangent"].asMatrix(Xb)
3728 # Durée d'observation et tailles
3729 if hasattr(Y,"stepnumber"):
3730 duration = Y.stepnumber()
3731 __p = numpy.cumprod(Y.shape())[-1]
3734 __p = numpy.array(Y).size
3736 # Précalcul des inversions de B et R
3737 if selfA._parameters["StoreInternalVariables"] \
3738 or selfA._toStore("CostFunctionJ") \
3739 or selfA._toStore("CostFunctionJb") \
3740 or selfA._toStore("CostFunctionJo") \
3741 or selfA._toStore("CurrentOptimum") \
3742 or selfA._toStore("APosterioriCovariance"):
3748 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
3751 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3752 selfA.StoredVariables["Analysis"].store( Xb )
3753 if selfA._toStore("APosterioriCovariance"):
3754 if hasattr(B,"asfullmatrix"):
3755 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
3757 selfA.StoredVariables["APosterioriCovariance"].store( B )
3758 selfA._setInternalState("seed", numpy.random.get_state())
3759 elif selfA._parameters["nextStep"]:
3760 Xn = selfA._getInternalState("Xn")
3761 Pn = selfA._getInternalState("Pn")
3763 if selfA._parameters["EstimationOf"] == "Parameters":
3765 previousJMinimum = numpy.finfo(float).max
3767 for step in range(duration-1):
3768 if hasattr(Y,"store"):
3769 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
3771 Ynpu = numpy.ravel( Y ).reshape((__p,1))
3774 if hasattr(U,"store") and len(U)>1:
3775 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
3776 elif hasattr(U,"store") and len(U)==1:
3777 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
3779 Un = numpy.asmatrix(numpy.ravel( U )).T
3783 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
3784 Xn_predicted = Mt * Xn
3785 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
3786 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
3787 Xn_predicted = Xn_predicted + Cm * Un
3788 Pn_predicted = Q + Mt * (Pn * Ma)
3789 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
3790 # --- > Par principe, M = Id, Q = 0
3794 if selfA._parameters["EstimationOf"] == "State":
3795 HX_predicted = Ht * Xn_predicted
3796 _Innovation = Ynpu - HX_predicted
3797 elif selfA._parameters["EstimationOf"] == "Parameters":
3798 HX_predicted = Ht * Xn_predicted
3799 _Innovation = Ynpu - HX_predicted
3800 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
3801 _Innovation = _Innovation - Cm * Un
3803 Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
3804 Xn = Xn_predicted + Kn * _Innovation
3805 Pn = Pn_predicted - Kn * Ht * Pn_predicted
3808 #--------------------------
3809 selfA._setInternalState("Xn", Xn)
3810 selfA._setInternalState("Pn", Pn)
3811 #--------------------------
3813 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3814 # ---> avec analysis
3815 selfA.StoredVariables["Analysis"].store( Xa )
3816 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
3817 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Ht * Xa )
3818 if selfA._toStore("InnovationAtCurrentAnalysis"):
3819 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
3820 # ---> avec current state
3821 if selfA._parameters["StoreInternalVariables"] \
3822 or selfA._toStore("CurrentState"):
3823 selfA.StoredVariables["CurrentState"].store( Xn )
3824 if selfA._toStore("ForecastState"):
3825 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
3826 if selfA._toStore("ForecastCovariance"):
3827 selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
3828 if selfA._toStore("BMA"):
3829 selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
3830 if selfA._toStore("InnovationAtCurrentState"):
3831 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3832 if selfA._toStore("SimulatedObservationAtCurrentState") \
3833 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3834 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
3836 if selfA._parameters["StoreInternalVariables"] \
3837 or selfA._toStore("CostFunctionJ") \
3838 or selfA._toStore("CostFunctionJb") \
3839 or selfA._toStore("CostFunctionJo") \
3840 or selfA._toStore("CurrentOptimum") \
3841 or selfA._toStore("APosterioriCovariance"):
3842 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
3843 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
3845 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3846 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3847 selfA.StoredVariables["CostFunctionJ" ].store( J )
3849 if selfA._toStore("IndexOfOptimum") \
3850 or selfA._toStore("CurrentOptimum") \
3851 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
3852 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
3853 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
3854 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3855 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3856 if selfA._toStore("IndexOfOptimum"):
3857 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3858 if selfA._toStore("CurrentOptimum"):
3859 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
3860 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3861 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
3862 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3863 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3864 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3865 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3866 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3867 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3868 if selfA._toStore("APosterioriCovariance"):
3869 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3870 if selfA._parameters["EstimationOf"] == "Parameters" \
3871 and J < previousJMinimum:
3872 previousJMinimum = J
3874 if selfA._toStore("APosterioriCovariance"):
3875 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
3877 # Stockage final supplémentaire de l'optimum en estimation de paramètres
3878 # ----------------------------------------------------------------------
3879 if selfA._parameters["EstimationOf"] == "Parameters":
3880 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3881 selfA.StoredVariables["Analysis"].store( XaMin )
3882 if selfA._toStore("APosterioriCovariance"):
3883 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3884 if selfA._toStore("BMA"):
3885 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3889 # ==============================================================================
3890 def uskf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3892 Unscented Kalman Filter
3894 if selfA._parameters["EstimationOf"] == "Parameters":
3895 selfA._parameters["StoreInternalVariables"] = True
3898 Alpha = selfA._parameters["Alpha"]
3899 Beta = selfA._parameters["Beta"]
3900 if selfA._parameters["Kappa"] == 0:
3901 if selfA._parameters["EstimationOf"] == "State":
3903 elif selfA._parameters["EstimationOf"] == "Parameters":
3906 Kappa = selfA._parameters["Kappa"]
3907 Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
3908 Gamma = math.sqrt( L + Lambda )
3912 for i in range(2*L):
3913 Ww.append( 1. / (2.*(L + Lambda)) )
3915 Wm = numpy.array( Ww )
3916 Wm[0] = Lambda / (L + Lambda)
3917 Wc = numpy.array( Ww )
3918 Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
3921 Hm = HO["Direct"].appliedControledFormTo
3923 if selfA._parameters["EstimationOf"] == "State":
3924 Mm = EM["Direct"].appliedControledFormTo
3926 if CM is not None and "Tangent" in CM and U is not None:
3927 Cm = CM["Tangent"].asMatrix(Xb)
3931 # Durée d'observation et tailles
3932 if hasattr(Y,"stepnumber"):
3933 duration = Y.stepnumber()
3934 __p = numpy.cumprod(Y.shape())[-1]
3937 __p = numpy.array(Y).size
3939 # Précalcul des inversions de B et R
3940 if selfA._parameters["StoreInternalVariables"] \
3941 or selfA._toStore("CostFunctionJ") \
3942 or selfA._toStore("CostFunctionJb") \
3943 or selfA._toStore("CostFunctionJo") \
3944 or selfA._toStore("CurrentOptimum") \
3945 or selfA._toStore("APosterioriCovariance"):
3951 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
3953 if hasattr(B,"asfullmatrix"):
3954 Pn = B.asfullmatrix(__n)
3957 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3958 selfA.StoredVariables["Analysis"].store( Xb )
3959 if selfA._toStore("APosterioriCovariance"):
3960 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3961 elif selfA._parameters["nextStep"]:
3962 Xn = selfA._getInternalState("Xn")
3963 Pn = selfA._getInternalState("Pn")
3965 if selfA._parameters["EstimationOf"] == "Parameters":
3967 previousJMinimum = numpy.finfo(float).max
3969 for step in range(duration-1):
3970 if hasattr(Y,"store"):
3971 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
3973 Ynpu = numpy.ravel( Y ).reshape((__p,1))
3976 if hasattr(U,"store") and len(U)>1:
3977 Un = numpy.ravel( U[step] ).reshape((-1,1))
3978 elif hasattr(U,"store") and len(U)==1:
3979 Un = numpy.ravel( U[0] ).reshape((-1,1))
3981 Un = numpy.ravel( U ).reshape((-1,1))
3985 Pndemi = numpy.real(scipy.linalg.sqrtm(Pn))
3986 Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
3987 nbSpts = 2*Xn.size+1
3990 for point in range(nbSpts):
3991 if selfA._parameters["EstimationOf"] == "State":
3992 XEtnnpi = numpy.asarray( Mm( (Xnp[:,point], Un) ) ).reshape((-1,1))
3993 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
3994 Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
3995 XEtnnpi = XEtnnpi + Cm * Un
3996 elif selfA._parameters["EstimationOf"] == "Parameters":
3997 # --- > Par principe, M = Id, Q = 0
3998 XEtnnpi = Xnp[:,point]
3999 XEtnnp.append( numpy.ravel(XEtnnpi).reshape((-1,1)) )
4000 XEtnnp = numpy.concatenate( XEtnnp, axis=1 )
4002 Xncm = ( XEtnnp * Wm ).sum(axis=1)
4004 if selfA._parameters["EstimationOf"] == "State": Pnm = Q
4005 elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
4006 for point in range(nbSpts):
4007 Pnm += Wc[i] * ((XEtnnp[:,point]-Xncm).reshape((-1,1)) * (XEtnnp[:,point]-Xncm))
4009 Pnmdemi = numpy.real(scipy.linalg.sqrtm(Pnm))
4011 Xnnp = numpy.hstack([Xncm.reshape((-1,1)), Xncm.reshape((-1,1))+Gamma*Pnmdemi, Xncm.reshape((-1,1))-Gamma*Pnmdemi])
4014 for point in range(nbSpts):
4015 if selfA._parameters["EstimationOf"] == "State":
4016 Ynnpi = Hm( (Xnnp[:,point], None) )
4017 elif selfA._parameters["EstimationOf"] == "Parameters":
4018 Ynnpi = Hm( (Xnnp[:,point], Un) )
4019 Ynnp.append( numpy.ravel(Ynnpi).reshape((-1,1)) )
4020 Ynnp = numpy.concatenate( Ynnp, axis=1 )
4022 Yncm = ( Ynnp * Wm ).sum(axis=1)
4026 for point in range(nbSpts):
4027 Pyyn += Wc[i] * ((Ynnp[:,point]-Yncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
4028 Pxyn += Wc[i] * ((Xnnp[:,point]-Xncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
4030 _Innovation = Ynpu - Yncm.reshape((-1,1))
4031 if selfA._parameters["EstimationOf"] == "Parameters":
4032 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
4033 _Innovation = _Innovation - Cm * Un
4036 Xn = Xncm.reshape((-1,1)) + Kn * _Innovation
4037 Pn = Pnm - Kn * Pyyn * Kn.T
4040 #--------------------------
4041 selfA._setInternalState("Xn", Xn)
4042 selfA._setInternalState("Pn", Pn)
4043 #--------------------------
4045 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
4046 # ---> avec analysis
4047 selfA.StoredVariables["Analysis"].store( Xa )
4048 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
4049 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) )
4050 if selfA._toStore("InnovationAtCurrentAnalysis"):
4051 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
4052 # ---> avec current state
4053 if selfA._parameters["StoreInternalVariables"] \
4054 or selfA._toStore("CurrentState"):
4055 selfA.StoredVariables["CurrentState"].store( Xn )
4056 if selfA._toStore("ForecastState"):
4057 selfA.StoredVariables["ForecastState"].store( Xncm )
4058 if selfA._toStore("ForecastCovariance"):
4059 selfA.StoredVariables["ForecastCovariance"].store( Pnm )
4060 if selfA._toStore("BMA"):
4061 selfA.StoredVariables["BMA"].store( Xncm - Xa )
4062 if selfA._toStore("InnovationAtCurrentState"):
4063 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
4064 if selfA._toStore("SimulatedObservationAtCurrentState") \
4065 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4066 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm )
4068 if selfA._parameters["StoreInternalVariables"] \
4069 or selfA._toStore("CostFunctionJ") \
4070 or selfA._toStore("CostFunctionJb") \
4071 or selfA._toStore("CostFunctionJo") \
4072 or selfA._toStore("CurrentOptimum") \
4073 or selfA._toStore("APosterioriCovariance"):
4074 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
4075 Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
4077 selfA.StoredVariables["CostFunctionJb"].store( Jb )
4078 selfA.StoredVariables["CostFunctionJo"].store( Jo )
4079 selfA.StoredVariables["CostFunctionJ" ].store( J )
4081 if selfA._toStore("IndexOfOptimum") \
4082 or selfA._toStore("CurrentOptimum") \
4083 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
4084 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
4085 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
4086 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4087 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4088 if selfA._toStore("IndexOfOptimum"):
4089 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
4090 if selfA._toStore("CurrentOptimum"):
4091 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
4092 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4093 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
4094 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
4095 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
4096 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
4097 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
4098 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
4099 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
4100 if selfA._toStore("APosterioriCovariance"):
4101 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
4102 if selfA._parameters["EstimationOf"] == "Parameters" \
4103 and J < previousJMinimum:
4104 previousJMinimum = J
4106 if selfA._toStore("APosterioriCovariance"):
4107 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
4109 # Stockage final supplémentaire de l'optimum en estimation de paramètres
4110 # ----------------------------------------------------------------------
4111 if selfA._parameters["EstimationOf"] == "Parameters":
4112 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
4113 selfA.StoredVariables["Analysis"].store( XaMin )
4114 if selfA._toStore("APosterioriCovariance"):
4115 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
4116 if selfA._toStore("BMA"):
4117 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
4121 # ==============================================================================
4122 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
4124 3DVAR variational analysis with no inversion of B
4129 Hm = HO["Direct"].appliedTo
4130 Ha = HO["Adjoint"].appliedInXTo
4135 Xini = numpy.zeros(Xb.shape)
4137 # Définition de la fonction-coût
4138 # ------------------------------
4139 def CostFunction(v):
4140 _V = numpy.asmatrix(numpy.ravel( v )).T
4142 if selfA._parameters["StoreInternalVariables"] or \
4143 selfA._toStore("CurrentState") or \
4144 selfA._toStore("CurrentOptimum"):
4145 selfA.StoredVariables["CurrentState"].store( _X )
4147 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
4148 _Innovation = Y - _HX
4149 if selfA._toStore("SimulatedObservationAtCurrentState") or \
4150 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4151 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
4152 if selfA._toStore("InnovationAtCurrentState"):
4153 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
4155 Jb = float( 0.5 * _V.T * BT * _V )
4156 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
4159 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
4160 selfA.StoredVariables["CostFunctionJb"].store( Jb )
4161 selfA.StoredVariables["CostFunctionJo"].store( Jo )
4162 selfA.StoredVariables["CostFunctionJ" ].store( J )
4163 if selfA._toStore("IndexOfOptimum") or \
4164 selfA._toStore("CurrentOptimum") or \
4165 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
4166 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
4167 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
4168 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4169 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4170 if selfA._toStore("IndexOfOptimum"):
4171 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
4172 if selfA._toStore("CurrentOptimum"):
4173 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
4174 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4175 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
4176 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
4177 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
4178 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
4179 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
4180 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
4181 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
4184 def GradientOfCostFunction(v):
4185 _V = v.reshape((-1,1))
4186 _X = Xb + (B @ _V).reshape((-1,1))
4187 _HX = Hm( _X ).reshape((-1,1))
4189 GradJo = - Ha( (_X, RI * (Y - _HX)) )
4190 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
4193 # Minimisation de la fonctionnelle
4194 # --------------------------------
4195 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
4197 if selfA._parameters["Minimizer"] == "LBFGSB":
4198 if "0.19" <= scipy.version.version <= "1.1.0":
4199 import lbfgsbhlt as optimiseur
4201 import scipy.optimize as optimiseur
4202 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
4203 func = CostFunction,
4205 fprime = GradientOfCostFunction,
4207 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
4208 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
4209 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
4210 pgtol = selfA._parameters["ProjectedGradientTolerance"],
4211 iprint = selfA._parameters["optiprint"],
4213 nfeval = Informations['funcalls']
4214 rc = Informations['warnflag']
4215 elif selfA._parameters["Minimizer"] == "TNC":
4216 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
4217 func = CostFunction,
4219 fprime = GradientOfCostFunction,
4221 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
4222 maxfun = selfA._parameters["MaximumNumberOfSteps"],
4223 pgtol = selfA._parameters["ProjectedGradientTolerance"],
4224 ftol = selfA._parameters["CostDecrementTolerance"],
4225 messages = selfA._parameters["optmessages"],
4227 elif selfA._parameters["Minimizer"] == "CG":
4228 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
4231 fprime = GradientOfCostFunction,
4233 maxiter = selfA._parameters["MaximumNumberOfSteps"],
4234 gtol = selfA._parameters["GradientNormTolerance"],
4235 disp = selfA._parameters["optdisp"],
4238 elif selfA._parameters["Minimizer"] == "NCG":
4239 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
4242 fprime = GradientOfCostFunction,
4244 maxiter = selfA._parameters["MaximumNumberOfSteps"],
4245 avextol = selfA._parameters["CostDecrementTolerance"],
4246 disp = selfA._parameters["optdisp"],
4249 elif selfA._parameters["Minimizer"] == "BFGS":
4250 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
4253 fprime = GradientOfCostFunction,
4255 maxiter = selfA._parameters["MaximumNumberOfSteps"],
4256 gtol = selfA._parameters["GradientNormTolerance"],
4257 disp = selfA._parameters["optdisp"],
4261 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
4263 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4264 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
4266 # Correction pour pallier a un bug de TNC sur le retour du Minimum
4267 # ----------------------------------------------------------------
4268 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
4269 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
4271 Minimum = Xb + B * Minimum.reshape((-1,1)) # Pas @
4274 #--------------------------
4276 selfA.StoredVariables["Analysis"].store( Xa )
4278 if selfA._toStore("OMA") or \
4279 selfA._toStore("SigmaObs2") or \
4280 selfA._toStore("SimulationQuantiles") or \
4281 selfA._toStore("SimulatedObservationAtOptimum"):
4282 if selfA._toStore("SimulatedObservationAtCurrentState"):
4283 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
4284 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4285 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
4289 if selfA._toStore("APosterioriCovariance") or \
4290 selfA._toStore("SimulationQuantiles") or \
4291 selfA._toStore("JacobianMatrixAtOptimum") or \
4292 selfA._toStore("KalmanGainAtOptimum"):
4293 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
4294 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
4295 if selfA._toStore("APosterioriCovariance") or \
4296 selfA._toStore("SimulationQuantiles") or \
4297 selfA._toStore("KalmanGainAtOptimum"):
4298 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
4299 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
4300 if selfA._toStore("APosterioriCovariance") or \
4301 selfA._toStore("SimulationQuantiles"):
4303 A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
4304 if selfA._toStore("APosterioriCovariance"):
4305 selfA.StoredVariables["APosterioriCovariance"].store( A )
4306 if selfA._toStore("JacobianMatrixAtOptimum"):
4307 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
4308 if selfA._toStore("KalmanGainAtOptimum"):
4309 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
4310 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
4311 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
4313 # Calculs et/ou stockages supplémentaires
4314 # ---------------------------------------
4315 if selfA._toStore("Innovation") or \
4316 selfA._toStore("SigmaObs2") or \
4317 selfA._toStore("MahalanobisConsistency") or \
4318 selfA._toStore("OMB"):
4320 if selfA._toStore("Innovation"):
4321 selfA.StoredVariables["Innovation"].store( d )
4322 if selfA._toStore("BMA"):
4323 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
4324 if selfA._toStore("OMA"):
4325 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
4326 if selfA._toStore("OMB"):
4327 selfA.StoredVariables["OMB"].store( d )
4328 if selfA._toStore("SigmaObs2"):
4329 TraceR = R.trace(Y.size)
4330 selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
4331 if selfA._toStore("MahalanobisConsistency"):
4332 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
4333 if selfA._toStore("SimulationQuantiles"):
4334 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
4335 if selfA._toStore("SimulatedObservationAtBackground"):
4336 selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
4337 if selfA._toStore("SimulatedObservationAtOptimum"):
4338 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
4342 # ==============================================================================
4343 if __name__ == "__main__":
4344 print('\n AUTODIAGNOSTIC\n')