1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les objets numériques génériques.
26 __author__ = "Jean-Philippe ARGAUD"
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
36 # ==============================================================================
37 def ExecuteFunction( triplet ):
38 assert len(triplet) == 3, "Incorrect number of arguments"
39 X, xArgs, funcrepr = triplet
40 __X = numpy.ravel( X ).reshape((-1,1))
41 __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42 __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43 __fonction = getattr(__module,funcrepr["__userFunction__name"])
44 sys.path = __sys_path_tmp ; del __sys_path_tmp
45 if isinstance(xArgs, dict):
46 __HX = __fonction( __X, **xArgs )
48 __HX = __fonction( __X )
49 return numpy.ravel( __HX )
51 # ==============================================================================
52 class FDApproximation(object):
54 Cette classe sert d'interface pour définir les opérateurs approximés. A la
55 création d'un objet, en fournissant une fonction "Function", on obtient un
56 objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
57 "AdjointOperator". On contrôle l'approximation DF avec l'incrément
58 multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
59 "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
60 centrées si le booléen "centeredDF" est vrai.
63 name = "FDApproximation",
68 extraArguments = None,
69 avoidingRedundancy = True,
70 toleranceInRedundancy = 1.e-18,
71 lenghtOfRedundancy = -1,
76 self.__name = str(name)
77 self.__extraArgs = extraArguments
80 import multiprocessing
81 self.__mpEnabled = True
83 self.__mpEnabled = False
85 self.__mpEnabled = False
86 self.__mpWorkers = mpWorkers
87 if self.__mpWorkers is not None and self.__mpWorkers < 1:
88 self.__mpWorkers = None
89 logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
92 self.__mfEnabled = True
94 self.__mfEnabled = False
95 logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
97 if avoidingRedundancy:
99 self.__tolerBP = float(toleranceInRedundancy)
100 self.__lenghtRJ = int(lenghtOfRedundancy)
101 self.__listJPCP = [] # Jacobian Previous Calculated Points
102 self.__listJPCI = [] # Jacobian Previous Calculated Increment
103 self.__listJPCR = [] # Jacobian Previous Calculated Results
104 self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
105 self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
107 self.__avoidRC = False
110 if isinstance(Function,types.FunctionType):
111 logging.debug("FDA Calculs en multiprocessing : FunctionType")
112 self.__userFunction__name = Function.__name__
114 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
116 mod = os.path.abspath(Function.__globals__['__file__'])
117 if not os.path.isfile(mod):
118 raise ImportError("No user defined function or method found with the name %s"%(mod,))
119 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
120 self.__userFunction__path = os.path.dirname(mod)
122 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
123 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
124 elif isinstance(Function,types.MethodType):
125 logging.debug("FDA Calculs en multiprocessing : MethodType")
126 self.__userFunction__name = Function.__name__
128 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
130 mod = os.path.abspath(Function.__func__.__globals__['__file__'])
131 if not os.path.isfile(mod):
132 raise ImportError("No user defined function or method found with the name %s"%(mod,))
133 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
134 self.__userFunction__path = os.path.dirname(mod)
136 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
137 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
139 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
141 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
142 self.__userFunction = self.__userOperator.appliedTo
144 self.__centeredDF = bool(centeredDF)
145 if abs(float(increment)) > 1.e-15:
146 self.__increment = float(increment)
148 self.__increment = 0.01
152 self.__dX = numpy.ravel( dX )
153 logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
155 logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
157 # ---------------------------------------------------------
158 def __doublon__(self, e, l, n, v=None):
159 __ac, __iac = False, -1
160 for i in range(len(l)-1,-1,-1):
161 if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
162 __ac, __iac = True, i
163 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
167 # ---------------------------------------------------------
168 def DirectOperator(self, X, **extraArgs ):
170 Calcul du direct à l'aide de la fonction fournie.
172 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
173 ne doivent pas être données ici à la fonction utilisateur.
175 logging.debug("FDA Calcul DirectOperator (explicite)")
177 _HX = self.__userFunction( X, argsAsSerie = True )
179 _HX = numpy.ravel(self.__userFunction( numpy.ravel(X) ))
183 # ---------------------------------------------------------
184 def TangentMatrix(self, X ):
186 Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
187 c'est-à-dire le gradient de H en X. On utilise des différences finies
188 directionnelles autour du point X. X est un numpy.matrix.
190 Différences finies centrées (approximation d'ordre 2):
191 1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
192 dX[i] à la composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
193 on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
195 2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
197 3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
199 Différences finies non centrées (approximation d'ordre 1):
200 1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
201 composante X[i] pour composer X_plus_dXi, et on calcule la réponse
202 HX_plus_dXi = H( X_plus_dXi )
203 2/ On calcule la valeur centrale HX = H(X)
204 3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
206 4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
209 logging.debug("FDA Début du calcul de la Jacobienne")
210 logging.debug("FDA Incrément de............: %s*X"%float(self.__increment))
211 logging.debug("FDA Approximation centrée...: %s"%(self.__centeredDF))
213 if X is None or len(X)==0:
214 raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
216 _X = numpy.ravel( X )
218 if self.__dX is None:
219 _dX = self.__increment * _X
221 _dX = numpy.ravel( self.__dX )
222 assert len(_X) == len(_dX), "Inconsistent dX increment length with respect to the X one"
223 assert _X.size == _dX.size, "Inconsistent dX increment size with respect to the X one"
225 if (_dX == 0.).any():
228 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
230 _dX = numpy.where( _dX == 0., moyenne, _dX )
232 __alreadyCalculated = False
234 __bidon, __alreadyCalculatedP = self.__doublon__(_X, self.__listJPCP, self.__listJPPN, None)
235 __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
236 if __alreadyCalculatedP == __alreadyCalculatedI > -1:
237 __alreadyCalculated, __i = True, __alreadyCalculatedP
238 logging.debug("FDA Cas J déjà calculé, récupération du doublon %i"%__i)
240 if __alreadyCalculated:
241 logging.debug("FDA Calcul Jacobienne (par récupération du doublon %i)"%__i)
242 _Jacobienne = self.__listJPCR[__i]
244 logging.debug("FDA Calcul Jacobienne (explicite)")
245 if self.__centeredDF:
247 if self.__mpEnabled and not self.__mfEnabled:
249 "__userFunction__path" : self.__userFunction__path,
250 "__userFunction__modl" : self.__userFunction__modl,
251 "__userFunction__name" : self.__userFunction__name,
254 for i in range( len(_dX) ):
256 _X_plus_dXi = numpy.array( _X, dtype=float )
257 _X_plus_dXi[i] = _X[i] + _dXi
258 _X_moins_dXi = numpy.array( _X, dtype=float )
259 _X_moins_dXi[i] = _X[i] - _dXi
261 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
262 _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
264 import multiprocessing
265 self.__pool = multiprocessing.Pool(self.__mpWorkers)
266 _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
271 for i in range( len(_dX) ):
272 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
274 elif self.__mfEnabled:
276 for i in range( len(_dX) ):
278 _X_plus_dXi = numpy.array( _X, dtype=float )
279 _X_plus_dXi[i] = _X[i] + _dXi
280 _X_moins_dXi = numpy.array( _X, dtype=float )
281 _X_moins_dXi[i] = _X[i] - _dXi
283 _xserie.append( _X_plus_dXi )
284 _xserie.append( _X_moins_dXi )
286 _HX_plusmoins_dX = self.DirectOperator( _xserie )
289 for i in range( len(_dX) ):
290 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
294 for i in range( _dX.size ):
296 _X_plus_dXi = numpy.array( _X, dtype=float )
297 _X_plus_dXi[i] = _X[i] + _dXi
298 _X_moins_dXi = numpy.array( _X, dtype=float )
299 _X_moins_dXi[i] = _X[i] - _dXi
301 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
302 _HX_moins_dXi = self.DirectOperator( _X_moins_dXi )
304 _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
308 if self.__mpEnabled and not self.__mfEnabled:
310 "__userFunction__path" : self.__userFunction__path,
311 "__userFunction__modl" : self.__userFunction__modl,
312 "__userFunction__name" : self.__userFunction__name,
315 _jobs.append( (_X, self.__extraArgs, funcrepr) )
316 for i in range( len(_dX) ):
317 _X_plus_dXi = numpy.array( _X, dtype=float )
318 _X_plus_dXi[i] = _X[i] + _dX[i]
320 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
322 import multiprocessing
323 self.__pool = multiprocessing.Pool(self.__mpWorkers)
324 _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
328 _HX = _HX_plus_dX.pop(0)
331 for i in range( len(_dX) ):
332 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
334 elif self.__mfEnabled:
337 for i in range( len(_dX) ):
338 _X_plus_dXi = numpy.array( _X, dtype=float )
339 _X_plus_dXi[i] = _X[i] + _dX[i]
341 _xserie.append( _X_plus_dXi )
343 _HX_plus_dX = self.DirectOperator( _xserie )
345 _HX = _HX_plus_dX.pop(0)
348 for i in range( len(_dX) ):
349 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
353 _HX = self.DirectOperator( _X )
354 for i in range( _dX.size ):
356 _X_plus_dXi = numpy.array( _X, dtype=float )
357 _X_plus_dXi[i] = _X[i] + _dXi
359 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
361 _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
364 _Jacobienne = numpy.transpose( numpy.vstack( _Jacobienne ) )
366 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
367 while len(self.__listJPCP) > self.__lenghtRJ:
368 self.__listJPCP.pop(0)
369 self.__listJPCI.pop(0)
370 self.__listJPCR.pop(0)
371 self.__listJPPN.pop(0)
372 self.__listJPIN.pop(0)
373 self.__listJPCP.append( copy.copy(_X) )
374 self.__listJPCI.append( copy.copy(_dX) )
375 self.__listJPCR.append( copy.copy(_Jacobienne) )
376 self.__listJPPN.append( numpy.linalg.norm(_X) )
377 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
379 logging.debug("FDA Fin du calcul de la Jacobienne")
383 # ---------------------------------------------------------
384 def TangentOperator(self, paire, **extraArgs ):
386 Calcul du tangent à l'aide de la Jacobienne.
388 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
389 ne doivent pas être données ici à la fonction utilisateur.
392 assert len(paire) == 1, "Incorrect lenght of arguments"
394 assert len(_paire) == 2, "Incorrect number of arguments"
396 assert len(paire) == 2, "Incorrect number of arguments"
399 _Jacobienne = self.TangentMatrix( X )
400 if dX is None or len(dX) == 0:
402 # Calcul de la forme matricielle si le second argument est None
403 # -------------------------------------------------------------
404 if self.__mfEnabled: return [_Jacobienne,]
405 else: return _Jacobienne
408 # Calcul de la valeur linéarisée de H en X appliqué à dX
409 # ------------------------------------------------------
410 _dX = numpy.ravel( dX )
411 _HtX = numpy.dot(_Jacobienne, _dX)
412 if self.__mfEnabled: return [_HtX,]
415 # ---------------------------------------------------------
416 def AdjointOperator(self, paire, **extraArgs ):
418 Calcul de l'adjoint à l'aide de la Jacobienne.
420 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
421 ne doivent pas être données ici à la fonction utilisateur.
424 assert len(paire) == 1, "Incorrect lenght of arguments"
426 assert len(_paire) == 2, "Incorrect number of arguments"
428 assert len(paire) == 2, "Incorrect number of arguments"
431 _JacobienneT = self.TangentMatrix( X ).T
432 if Y is None or len(Y) == 0:
434 # Calcul de la forme matricielle si le second argument est None
435 # -------------------------------------------------------------
436 if self.__mfEnabled: return [_JacobienneT,]
437 else: return _JacobienneT
440 # Calcul de la valeur de l'adjoint en X appliqué à Y
441 # --------------------------------------------------
442 _Y = numpy.ravel( Y )
443 _HaY = numpy.dot(_JacobienneT, _Y)
444 if self.__mfEnabled: return [_HaY,]
447 # ==============================================================================
448 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
449 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
451 _bgcenter = numpy.ravel(_bgcenter)[:,None]
453 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
455 if _bgcovariance is None:
456 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
458 _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
459 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
461 return BackgroundEnsemble
463 # ==============================================================================
464 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
465 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
466 def __CenteredRandomAnomalies(Zr, N):
468 Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
469 notes manuscrites de MB et conforme au code de PS avec eps = -1
472 Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
473 Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
474 R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
479 _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
481 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
482 if _bgcovariance is None:
483 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
486 U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
487 _nbctl = _bgcenter.size
488 if _nbmembers > _nbctl:
489 _Z = numpy.concatenate((numpy.dot(
490 numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
491 numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
493 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
494 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
495 BackgroundEnsemble = _bgcenter + _Zca
497 if max(abs(_bgcovariance.flatten())) > 0:
498 _nbctl = _bgcenter.size
499 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
500 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
501 BackgroundEnsemble = _bgcenter + _Zca
503 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
505 return BackgroundEnsemble
507 # ==============================================================================
508 def EnsembleMean( __Ensemble ):
509 "Renvoie la moyenne empirique d'un ensemble"
510 return numpy.asarray(__Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
512 # ==============================================================================
513 def EnsembleOfAnomalies( __Ensemble, __OptMean = None, __Normalisation = 1.):
514 "Renvoie les anomalies centrées à partir d'un ensemble"
515 if __OptMean is None:
516 __Em = EnsembleMean( __Ensemble )
518 __Em = numpy.ravel( __OptMean ).reshape((-1,1))
520 return __Normalisation * (numpy.asarray( __Ensemble ) - __Em)
522 # ==============================================================================
523 def EnsembleErrorCovariance( __Ensemble, __quick = False ):
524 "Renvoie l'estimation empirique de la covariance d'ensemble"
526 # Covariance rapide mais rarement définie positive
527 __Covariance = numpy.cov( __Ensemble )
529 # Résultat souvent identique à numpy.cov, mais plus robuste
530 __n, __m = numpy.asarray( __Ensemble ).shape
531 __Anomalies = EnsembleOfAnomalies( __Ensemble )
532 # Estimation empirique
533 __Covariance = ( __Anomalies @ __Anomalies.T ) / (__m-1)
535 __Covariance = ( __Covariance + __Covariance.T ) * 0.5
536 # Assure la positivité
537 __epsilon = mpr*numpy.trace( __Covariance )
538 __Covariance = __Covariance + __epsilon * numpy.identity(__n)
542 # ==============================================================================
543 def EnsemblePerturbationWithGivenCovariance( __Ensemble, __Covariance, __Seed=None ):
544 "Ajout d'une perturbation à chaque membre d'un ensemble selon une covariance prescrite"
545 if hasattr(__Covariance,"assparsematrix"):
546 if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance.assparsematrix())/abs(__Ensemble).mean() < mpr).all():
547 # Traitement d'une covariance nulle ou presque
549 if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance.assparsematrix()) < mpr).all():
550 # Traitement d'une covariance nulle ou presque
553 if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance)/abs(__Ensemble).mean() < mpr).all():
554 # Traitement d'une covariance nulle ou presque
556 if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance) < mpr).all():
557 # Traitement d'une covariance nulle ou presque
560 __n, __m = __Ensemble.shape
561 if __Seed is not None: numpy.random.seed(__Seed)
563 if hasattr(__Covariance,"isscalar") and __Covariance.isscalar():
564 # Traitement d'une covariance multiple de l'identité
566 __std = numpy.sqrt(__Covariance.assparsematrix())
567 __Ensemble += numpy.random.normal(__zero, __std, size=(__m,__n)).T
569 elif hasattr(__Covariance,"isvector") and __Covariance.isvector():
570 # Traitement d'une covariance diagonale avec variances non identiques
571 __zero = numpy.zeros(__n)
572 __std = numpy.sqrt(__Covariance.assparsematrix())
573 __Ensemble += numpy.asarray([numpy.random.normal(__zero, __std) for i in range(__m)]).T
575 elif hasattr(__Covariance,"ismatrix") and __Covariance.ismatrix():
576 # Traitement d'une covariance pleine
577 __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance.asfullmatrix(__n), size=__m).T
579 elif isinstance(__Covariance, numpy.ndarray):
580 # Traitement d'une covariance numpy pleine, sachant qu'on arrive ici en dernier
581 __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance, size=__m).T
584 raise ValueError("Error in ensemble perturbation with inadequate covariance specification")
588 # ==============================================================================
589 def CovarianceInflation(
591 InflationType = None,
592 InflationFactor = None,
593 BackgroundCov = None,
596 Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
598 Synthèse : Hunt 2007, section 2.3.5
600 if InflationFactor is None:
603 InflationFactor = float(InflationFactor)
605 if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
606 if InflationFactor < 1.:
607 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
608 if InflationFactor < 1.+mpr:
610 OutputCovOrEns = InflationFactor**2 * InputCovOrEns
612 elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
613 if InflationFactor < 1.:
614 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
615 if InflationFactor < 1.+mpr:
617 InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
618 OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
619 + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
621 elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
622 if InflationFactor < 0.:
623 raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
624 if InflationFactor < mpr:
626 __n, __m = numpy.asarray(InputCovOrEns).shape
628 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
629 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
631 elif InflationType == "HybridOnBackgroundCovariance":
632 if InflationFactor < 0.:
633 raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
634 if InflationFactor < mpr:
636 __n, __m = numpy.asarray(InputCovOrEns).shape
638 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
639 if BackgroundCov is None:
640 raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
641 if InputCovOrEns.shape != BackgroundCov.shape:
642 raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
643 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
645 elif InflationType == "Relaxation":
646 raise NotImplementedError("InflationType Relaxation")
649 raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
651 return OutputCovOrEns
653 # ==============================================================================
654 def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None):
655 "Estimation des quantiles a posteriori (selfA est modifié)"
656 nbsamples = selfA._parameters["NumberOfSamplesForQuantiles"]
658 # Traitement des bornes
659 if "StateBoundsForQuantiles" in selfA._parameters:
660 LBounds = selfA._parameters["StateBoundsForQuantiles"] # Prioritaire
661 elif "Bounds" in selfA._parameters:
662 LBounds = selfA._parameters["Bounds"] # Défaut raisonnable
665 if LBounds is not None:
666 LBounds = ForceNumericBounds( LBounds )
667 _Xa = numpy.ravel(Xa)
669 # Échantillonnage des états
672 for i in range(nbsamples):
673 if selfA._parameters["SimulationForQuantiles"] == "Linear" and HtM is not None and HXa is not None:
674 dXr = (numpy.random.multivariate_normal(_Xa,A) - _Xa).reshape((-1,1))
675 if LBounds is not None: # "EstimateProjection" par défaut
676 dXr = numpy.max(numpy.hstack((dXr,LBounds[:,0].reshape((-1,1))) - Xa),axis=1)
677 dXr = numpy.min(numpy.hstack((dXr,LBounds[:,1].reshape((-1,1))) - Xa),axis=1)
679 Yr = HXa.reshape((-1,1)) + dYr
680 if selfA._toStore("SampledStateForQuantiles"): Xr = _Xa + numpy.ravel(dXr)
681 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear" and Hm is not None:
682 Xr = numpy.random.multivariate_normal(_Xa,A)
683 if LBounds is not None: # "EstimateProjection" par défaut
684 Xr = numpy.max(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,0].reshape((-1,1)))),axis=1)
685 Xr = numpy.min(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,1].reshape((-1,1)))),axis=1)
688 raise ValueError("Quantile simulations has only to be Linear or NonLinear.")
691 YfQ = Yr.reshape((-1,1))
692 if selfA._toStore("SampledStateForQuantiles"): EXr = Xr.reshape((-1,1))
694 YfQ = numpy.hstack((YfQ,Yr.reshape((-1,1))))
695 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.hstack((EXr,Xr.reshape((-1,1))))
697 # Extraction des quantiles
700 for quantile in selfA._parameters["Quantiles"]:
701 if not (0. <= float(quantile) <= 1.): continue
702 indice = int(nbsamples * float(quantile) - 1./nbsamples)
703 if YQ is None: YQ = YfQ[:,indice].reshape((-1,1))
704 else: YQ = numpy.hstack((YQ,YfQ[:,indice].reshape((-1,1))))
705 if YQ is not None: # Liste non vide de quantiles
706 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
707 if selfA._toStore("SampledStateForQuantiles"):
708 selfA.StoredVariables["SampledStateForQuantiles"].store( EXr )
712 # ==============================================================================
713 def ForceNumericBounds( __Bounds ):
714 "Force les bornes à être des valeurs numériques, sauf si globalement None"
715 # Conserve une valeur par défaut à None s'il n'y a pas de bornes
716 if __Bounds is None: return None
717 # Converti toutes les bornes individuelles None à +/- l'infini
718 __Bounds = numpy.asarray( __Bounds, dtype=float )
719 if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
720 raise ValueError("Incorrectly shaped bounds data")
721 __Bounds[numpy.isnan(__Bounds[:,0]),0] = -sys.float_info.max
722 __Bounds[numpy.isnan(__Bounds[:,1]),1] = sys.float_info.max
725 # ==============================================================================
726 def RecentredBounds( __Bounds, __Center):
727 "Recentre les bornes autour de 0, sauf si globalement None"
728 # Conserve une valeur par défaut à None s'il n'y a pas de bornes
729 if __Bounds is None: return None
730 # Recentre les valeurs numériques de bornes
731 return ForceNumericBounds( __Bounds ) - numpy.ravel( __Center ).transpose((-1,1))
733 # ==============================================================================
734 def ApplyBounds( __Vector, __Bounds, __newClip = True):
735 "Applique des bornes numériques à un point"
736 # Conserve une valeur par défaut s'il n'y a pas de bornes
737 if __Bounds is None: return __Vector
739 if not isinstance(__Vector, numpy.ndarray): # Is an array
740 raise ValueError("Incorrect array definition of vector data")
741 if not isinstance(__Bounds, numpy.ndarray): # Is an array
742 raise ValueError("Incorrect array definition of bounds data")
743 if 2*__Vector.size != __Bounds.size: # Is a 2 column array of vector lenght
744 raise ValueError("Incorrect bounds number to be applied for this vector")
745 if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
746 raise ValueError("Incorrectly shaped bounds data")
749 __Vector = __Vector.clip(
750 __Bounds[:,0].reshape(__Vector.shape),
751 __Bounds[:,1].reshape(__Vector.shape),
754 __Vector = numpy.max(numpy.hstack((__Vector.reshape((-1,1)),numpy.asmatrix(__Bounds)[:,0])),axis=1)
755 __Vector = numpy.min(numpy.hstack((__Vector.reshape((-1,1)),numpy.asmatrix(__Bounds)[:,1])),axis=1)
756 __Vector = numpy.asarray(__Vector)
760 # ==============================================================================
761 def cekf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
763 Contrained Extended Kalman Filter
765 if selfA._parameters["EstimationOf"] == "Parameters":
766 selfA._parameters["StoreInternalVariables"] = True
767 selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
770 H = HO["Direct"].appliedControledFormTo
772 if selfA._parameters["EstimationOf"] == "State":
773 M = EM["Direct"].appliedControledFormTo
775 if CM is not None and "Tangent" in CM and U is not None:
776 Cm = CM["Tangent"].asMatrix(Xb)
780 # Durée d'observation et tailles
781 if hasattr(Y,"stepnumber"):
782 duration = Y.stepnumber()
783 __p = numpy.cumprod(Y.shape())[-1]
786 __p = numpy.array(Y).size
788 # Précalcul des inversions de B et R
789 if selfA._parameters["StoreInternalVariables"] \
790 or selfA._toStore("CostFunctionJ") \
791 or selfA._toStore("CostFunctionJb") \
792 or selfA._toStore("CostFunctionJo") \
793 or selfA._toStore("CurrentOptimum") \
794 or selfA._toStore("APosterioriCovariance"):
800 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
803 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
804 selfA.StoredVariables["Analysis"].store( Xb )
805 if selfA._toStore("APosterioriCovariance"):
806 if hasattr(B,"asfullmatrix"):
807 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
809 selfA.StoredVariables["APosterioriCovariance"].store( B )
810 selfA._setInternalState("seed", numpy.random.get_state())
811 elif selfA._parameters["nextStep"]:
812 Xn = selfA._getInternalState("Xn")
813 Pn = selfA._getInternalState("Pn")
815 if selfA._parameters["EstimationOf"] == "Parameters":
817 previousJMinimum = numpy.finfo(float).max
819 for step in range(duration-1):
820 if hasattr(Y,"store"):
821 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
823 Ynpu = numpy.ravel( Y ).reshape((__p,1))
825 Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
826 Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
827 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
828 Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
830 if selfA._parameters["EstimationOf"] == "State":
831 Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
832 Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
833 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
834 Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
837 if hasattr(U,"store") and len(U)>1:
838 Un = numpy.ravel( U[step] ).reshape((-1,1))
839 elif hasattr(U,"store") and len(U)==1:
840 Un = numpy.ravel( U[0] ).reshape((-1,1))
842 Un = numpy.ravel( U ).reshape((-1,1))
846 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
847 Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
849 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
850 Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
851 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
852 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
853 Xn_predicted = Xn_predicted + Cm * Un
854 Pn_predicted = Q + Mt * (Pn * Ma)
855 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
856 # --- > Par principe, M = Id, Q = 0
860 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
861 Xn_predicted = ApplyBounds( Xn_predicted, selfA._parameters["Bounds"] )
863 if selfA._parameters["EstimationOf"] == "State":
864 HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
865 _Innovation = Ynpu - HX_predicted
866 elif selfA._parameters["EstimationOf"] == "Parameters":
867 HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
868 _Innovation = Ynpu - HX_predicted
869 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
870 _Innovation = _Innovation - Cm * Un
872 Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
873 Xn = Xn_predicted + Kn * _Innovation
874 Pn = Pn_predicted - Kn * Ht * Pn_predicted
876 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
877 Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
880 #--------------------------
881 selfA._setInternalState("Xn", Xn)
882 selfA._setInternalState("Pn", Pn)
883 #--------------------------
885 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
887 selfA.StoredVariables["Analysis"].store( Xa )
888 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
889 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
890 if selfA._toStore("InnovationAtCurrentAnalysis"):
891 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
892 # ---> avec current state
893 if selfA._parameters["StoreInternalVariables"] \
894 or selfA._toStore("CurrentState"):
895 selfA.StoredVariables["CurrentState"].store( Xn )
896 if selfA._toStore("ForecastState"):
897 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
898 if selfA._toStore("ForecastCovariance"):
899 selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
900 if selfA._toStore("BMA"):
901 selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
902 if selfA._toStore("InnovationAtCurrentState"):
903 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
904 if selfA._toStore("SimulatedObservationAtCurrentState") \
905 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
906 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
908 if selfA._parameters["StoreInternalVariables"] \
909 or selfA._toStore("CostFunctionJ") \
910 or selfA._toStore("CostFunctionJb") \
911 or selfA._toStore("CostFunctionJo") \
912 or selfA._toStore("CurrentOptimum") \
913 or selfA._toStore("APosterioriCovariance"):
914 Jb = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
915 Jo = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
917 selfA.StoredVariables["CostFunctionJb"].store( Jb )
918 selfA.StoredVariables["CostFunctionJo"].store( Jo )
919 selfA.StoredVariables["CostFunctionJ" ].store( J )
921 if selfA._toStore("IndexOfOptimum") \
922 or selfA._toStore("CurrentOptimum") \
923 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
924 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
925 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
926 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
927 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
928 if selfA._toStore("IndexOfOptimum"):
929 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
930 if selfA._toStore("CurrentOptimum"):
931 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
932 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
933 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
934 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
935 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
936 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
937 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
938 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
939 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
940 if selfA._toStore("APosterioriCovariance"):
941 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
942 if selfA._parameters["EstimationOf"] == "Parameters" \
943 and J < previousJMinimum:
946 if selfA._toStore("APosterioriCovariance"):
947 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
949 # Stockage final supplémentaire de l'optimum en estimation de paramètres
950 # ----------------------------------------------------------------------
951 if selfA._parameters["EstimationOf"] == "Parameters":
952 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
953 selfA.StoredVariables["Analysis"].store( XaMin )
954 if selfA._toStore("APosterioriCovariance"):
955 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
956 if selfA._toStore("BMA"):
957 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
961 # ==============================================================================
962 def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
968 H = HO["Direct"].appliedControledFormTo
970 if selfA._parameters["EstimationOf"] == "State":
971 M = EM["Direct"].appliedControledFormTo
973 if CM is not None and "Tangent" in CM and U is not None:
974 Cm = CM["Tangent"].asMatrix(Xb)
978 # Précalcul des inversions de B et R
981 # Durée d'observation et tailles
982 LagL = selfA._parameters["SmootherLagL"]
983 if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
984 raise ValueError("Fixed-lag smoother requires a series of observation")
985 if Y.stepnumber() < LagL:
986 raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
987 duration = Y.stepnumber()
988 __p = numpy.cumprod(Y.shape())[-1]
990 __m = selfA._parameters["NumberOfMembers"]
992 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
993 selfA.StoredVariables["Analysis"].store( Xb )
994 if selfA._toStore("APosterioriCovariance"):
995 if hasattr(B,"asfullmatrix"):
996 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
998 selfA.StoredVariables["APosterioriCovariance"].store( B )
1000 # Calcul direct initial (on privilégie la mémorisation au recalcul)
1001 __seed = numpy.random.get_state()
1002 selfB = copy.deepcopy(selfA)
1003 selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
1004 if VariantM == "EnKS16-KalmanFilterFormula":
1005 etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
1007 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1009 EL = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
1011 EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
1012 selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
1014 for step in range(LagL,duration-1):
1016 sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
1019 if hasattr(Y,"store"):
1020 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1022 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1025 if hasattr(U,"store") and len(U)>1:
1026 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1027 elif hasattr(U,"store") and len(U)==1:
1028 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1030 Un = numpy.asmatrix(numpy.ravel( U )).T
1034 #--------------------------
1035 if VariantM == "EnKS16-KalmanFilterFormula":
1036 if selfA._parameters["EstimationOf"] == "State": # Forecast
1037 EL = M( [(EL[:,i], Un) for i in range(__m)],
1039 returnSerieAsArrayMatrix = True )
1040 EL = EnsemblePerturbationWithGivenCovariance( EL, Q )
1041 EZ = H( [(EL[:,i], Un) for i in range(__m)],
1043 returnSerieAsArrayMatrix = True )
1044 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1045 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1047 elif selfA._parameters["EstimationOf"] == "Parameters":
1048 # --- > Par principe, M = Id, Q = 0
1049 EZ = H( [(EL[:,i], Un) for i in range(__m)],
1051 returnSerieAsArrayMatrix = True )
1053 vEm = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1054 vZm = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1056 mS = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
1057 mS = mS.reshape((-1,__m)) # Pour dimension 1
1058 delta = RIdemi @ ( Ynpu - vZm )
1059 mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
1060 vw = mT @ mS.T @ delta
1062 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
1063 mU = numpy.identity(__m)
1064 wTU = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
1066 EX = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
1070 for irl in range(LagL): # Lissage des L précédentes analysis
1071 vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1072 EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
1073 sEL[irl] = vEm + EX @ wTU
1075 # Conservation de l'analyse retrospective d'ordre 0 avant rotation
1076 Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1077 if selfA._toStore("APosterioriCovariance"):
1080 for irl in range(LagL):
1081 sEL[irl] = sEL[irl+1]
1083 #--------------------------
1085 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1087 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1088 # ---> avec analysis
1089 selfA.StoredVariables["Analysis"].store( Xa )
1090 if selfA._toStore("APosterioriCovariance"):
1091 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
1093 # Stockage des dernières analyses incomplètement remises à jour
1094 for irl in range(LagL):
1095 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1096 Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1097 selfA.StoredVariables["Analysis"].store( Xa )
1101 # ==============================================================================
1102 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
1104 Ensemble-Transform EnKF
1106 if selfA._parameters["EstimationOf"] == "Parameters":
1107 selfA._parameters["StoreInternalVariables"] = True
1110 H = HO["Direct"].appliedControledFormTo
1112 if selfA._parameters["EstimationOf"] == "State":
1113 M = EM["Direct"].appliedControledFormTo
1115 if CM is not None and "Tangent" in CM and U is not None:
1116 Cm = CM["Tangent"].asMatrix(Xb)
1120 # Durée d'observation et tailles
1121 if hasattr(Y,"stepnumber"):
1122 duration = Y.stepnumber()
1123 __p = numpy.cumprod(Y.shape())[-1]
1126 __p = numpy.array(Y).size
1128 # Précalcul des inversions de B et R
1129 if selfA._parameters["StoreInternalVariables"] \
1130 or selfA._toStore("CostFunctionJ") \
1131 or selfA._toStore("CostFunctionJb") \
1132 or selfA._toStore("CostFunctionJo") \
1133 or selfA._toStore("CurrentOptimum") \
1134 or selfA._toStore("APosterioriCovariance"):
1137 elif VariantM != "KalmanFilterFormula":
1139 if VariantM == "KalmanFilterFormula":
1143 __m = selfA._parameters["NumberOfMembers"]
1145 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1146 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1147 selfA.StoredVariables["Analysis"].store( Xb )
1148 if selfA._toStore("APosterioriCovariance"):
1149 if hasattr(B,"asfullmatrix"):
1150 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1152 selfA.StoredVariables["APosterioriCovariance"].store( B )
1153 selfA._setInternalState("seed", numpy.random.get_state())
1154 elif selfA._parameters["nextStep"]:
1155 Xn = selfA._getInternalState("Xn")
1157 previousJMinimum = numpy.finfo(float).max
1159 for step in range(duration-1):
1160 numpy.random.set_state(selfA._getInternalState("seed"))
1161 if hasattr(Y,"store"):
1162 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1164 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1167 if hasattr(U,"store") and len(U)>1:
1168 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1169 elif hasattr(U,"store") and len(U)==1:
1170 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1172 Un = numpy.asmatrix(numpy.ravel( U )).T
1176 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1177 Xn = CovarianceInflation( Xn,
1178 selfA._parameters["InflationType"],
1179 selfA._parameters["InflationFactor"],
1182 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1183 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1185 returnSerieAsArrayMatrix = True )
1186 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
1187 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1189 returnSerieAsArrayMatrix = True )
1190 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1191 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1192 Xn_predicted = Xn_predicted + Cm * Un
1193 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1194 # --- > Par principe, M = Id, Q = 0
1195 Xn_predicted = EMX = Xn
1196 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1198 returnSerieAsArrayMatrix = True )
1200 # Mean of forecast and observation of forecast
1201 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1202 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1205 EaX = EnsembleOfAnomalies( Xn_predicted, Xfm )
1206 EaHX = EnsembleOfAnomalies( HX_predicted, Hfm)
1208 #--------------------------
1209 if VariantM == "KalmanFilterFormula":
1210 mS = RIdemi * EaHX / math.sqrt(__m-1)
1211 mS = mS.reshape((-1,__m)) # Pour dimension 1
1212 delta = RIdemi * ( Ynpu - Hfm )
1213 mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
1214 vw = mT @ mS.T @ delta
1216 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
1217 mU = numpy.identity(__m)
1219 EaX = EaX / math.sqrt(__m-1)
1220 Xn = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
1221 #--------------------------
1222 elif VariantM == "Variational":
1223 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1224 def CostFunction(w):
1225 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1226 _Jo = 0.5 * _A.T @ (RI * _A)
1227 _Jb = 0.5 * (__m-1) * w.T @ w
1230 def GradientOfCostFunction(w):
1231 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1232 _GardJo = - EaHX.T @ (RI * _A)
1233 _GradJb = (__m-1) * w.reshape((__m,1))
1234 _GradJ = _GardJo + _GradJb
1235 return numpy.ravel(_GradJ)
1236 vw = scipy.optimize.fmin_cg(
1238 x0 = numpy.zeros(__m),
1239 fprime = GradientOfCostFunction,
1244 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1245 Htb = (__m-1) * numpy.identity(__m)
1248 Pta = numpy.linalg.inv( Hta )
1249 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1251 Xn = Xfm + EaX @ (vw[:,None] + EWa)
1252 #--------------------------
1253 elif VariantM == "FiniteSize11": # Jauge Boc2011
1254 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1255 def CostFunction(w):
1256 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1257 _Jo = 0.5 * _A.T @ (RI * _A)
1258 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
1261 def GradientOfCostFunction(w):
1262 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1263 _GardJo = - EaHX.T @ (RI * _A)
1264 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1265 _GradJ = _GardJo + _GradJb
1266 return numpy.ravel(_GradJ)
1267 vw = scipy.optimize.fmin_cg(
1269 x0 = numpy.zeros(__m),
1270 fprime = GradientOfCostFunction,
1275 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1277 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1278 / (1 + 1/__m + vw.T @ vw)**2
1281 Pta = numpy.linalg.inv( Hta )
1282 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1284 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1285 #--------------------------
1286 elif VariantM == "FiniteSize15": # Jauge Boc2015
1287 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1288 def CostFunction(w):
1289 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1290 _Jo = 0.5 * _A.T * RI * _A
1291 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
1294 def GradientOfCostFunction(w):
1295 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1296 _GardJo = - EaHX.T @ (RI * _A)
1297 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1298 _GradJ = _GardJo + _GradJb
1299 return numpy.ravel(_GradJ)
1300 vw = scipy.optimize.fmin_cg(
1302 x0 = numpy.zeros(__m),
1303 fprime = GradientOfCostFunction,
1308 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1310 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1311 / (1 + 1/__m + vw.T @ vw)**2
1314 Pta = numpy.linalg.inv( Hta )
1315 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1317 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1318 #--------------------------
1319 elif VariantM == "FiniteSize16": # Jauge Boc2016
1320 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1321 def CostFunction(w):
1322 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1323 _Jo = 0.5 * _A.T @ (RI * _A)
1324 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
1327 def GradientOfCostFunction(w):
1328 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1329 _GardJo = - EaHX.T @ (RI * _A)
1330 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
1331 _GradJ = _GardJo + _GradJb
1332 return numpy.ravel(_GradJ)
1333 vw = scipy.optimize.fmin_cg(
1335 x0 = numpy.zeros(__m),
1336 fprime = GradientOfCostFunction,
1341 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1342 Htb = ((__m+1) / (__m-1)) * \
1343 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
1344 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
1347 Pta = numpy.linalg.inv( Hta )
1348 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1350 Xn = Xfm + EaX @ (vw[:,None] + EWa)
1351 #--------------------------
1353 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1355 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1356 Xn = CovarianceInflation( Xn,
1357 selfA._parameters["InflationType"],
1358 selfA._parameters["InflationFactor"],
1361 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1362 #--------------------------
1363 selfA._setInternalState("Xn", Xn)
1364 selfA._setInternalState("seed", numpy.random.get_state())
1365 #--------------------------
1367 if selfA._parameters["StoreInternalVariables"] \
1368 or selfA._toStore("CostFunctionJ") \
1369 or selfA._toStore("CostFunctionJb") \
1370 or selfA._toStore("CostFunctionJo") \
1371 or selfA._toStore("APosterioriCovariance") \
1372 or selfA._toStore("InnovationAtCurrentAnalysis") \
1373 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1374 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1375 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1376 _Innovation = Ynpu - _HXa
1378 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1379 # ---> avec analysis
1380 selfA.StoredVariables["Analysis"].store( Xa )
1381 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1382 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1383 if selfA._toStore("InnovationAtCurrentAnalysis"):
1384 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1385 # ---> avec current state
1386 if selfA._parameters["StoreInternalVariables"] \
1387 or selfA._toStore("CurrentState"):
1388 selfA.StoredVariables["CurrentState"].store( Xn )
1389 if selfA._toStore("ForecastState"):
1390 selfA.StoredVariables["ForecastState"].store( EMX )
1391 if selfA._toStore("ForecastCovariance"):
1392 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
1393 if selfA._toStore("BMA"):
1394 selfA.StoredVariables["BMA"].store( EMX - Xa )
1395 if selfA._toStore("InnovationAtCurrentState"):
1396 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1397 if selfA._toStore("SimulatedObservationAtCurrentState") \
1398 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1399 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1401 if selfA._parameters["StoreInternalVariables"] \
1402 or selfA._toStore("CostFunctionJ") \
1403 or selfA._toStore("CostFunctionJb") \
1404 or selfA._toStore("CostFunctionJo") \
1405 or selfA._toStore("CurrentOptimum") \
1406 or selfA._toStore("APosterioriCovariance"):
1407 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1408 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1410 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1411 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1412 selfA.StoredVariables["CostFunctionJ" ].store( J )
1414 if selfA._toStore("IndexOfOptimum") \
1415 or selfA._toStore("CurrentOptimum") \
1416 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1417 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1418 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1419 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1420 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1421 if selfA._toStore("IndexOfOptimum"):
1422 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1423 if selfA._toStore("CurrentOptimum"):
1424 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1425 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1426 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1427 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1428 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1429 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1430 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1431 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1432 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1433 if selfA._toStore("APosterioriCovariance"):
1434 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1435 if selfA._parameters["EstimationOf"] == "Parameters" \
1436 and J < previousJMinimum:
1437 previousJMinimum = J
1439 if selfA._toStore("APosterioriCovariance"):
1440 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1441 # ---> Pour les smoothers
1442 if selfA._toStore("CurrentEnsembleState"):
1443 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
1445 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1446 # ----------------------------------------------------------------------
1447 if selfA._parameters["EstimationOf"] == "Parameters":
1448 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1449 selfA.StoredVariables["Analysis"].store( XaMin )
1450 if selfA._toStore("APosterioriCovariance"):
1451 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1452 if selfA._toStore("BMA"):
1453 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1457 # ==============================================================================
1458 def exkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1460 Extended Kalman Filter
1462 if selfA._parameters["EstimationOf"] == "Parameters":
1463 selfA._parameters["StoreInternalVariables"] = True
1466 H = HO["Direct"].appliedControledFormTo
1468 if selfA._parameters["EstimationOf"] == "State":
1469 M = EM["Direct"].appliedControledFormTo
1471 if CM is not None and "Tangent" in CM and U is not None:
1472 Cm = CM["Tangent"].asMatrix(Xb)
1476 # Durée d'observation et tailles
1477 if hasattr(Y,"stepnumber"):
1478 duration = Y.stepnumber()
1479 __p = numpy.cumprod(Y.shape())[-1]
1482 __p = numpy.array(Y).size
1484 # Précalcul des inversions de B et R
1485 if selfA._parameters["StoreInternalVariables"] \
1486 or selfA._toStore("CostFunctionJ") \
1487 or selfA._toStore("CostFunctionJb") \
1488 or selfA._toStore("CostFunctionJo") \
1489 or selfA._toStore("CurrentOptimum") \
1490 or selfA._toStore("APosterioriCovariance"):
1496 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1499 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1500 selfA.StoredVariables["Analysis"].store( Xb )
1501 if selfA._toStore("APosterioriCovariance"):
1502 if hasattr(B,"asfullmatrix"):
1503 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1505 selfA.StoredVariables["APosterioriCovariance"].store( B )
1506 selfA._setInternalState("seed", numpy.random.get_state())
1507 elif selfA._parameters["nextStep"]:
1508 Xn = selfA._getInternalState("Xn")
1509 Pn = selfA._getInternalState("Pn")
1511 if selfA._parameters["EstimationOf"] == "Parameters":
1513 previousJMinimum = numpy.finfo(float).max
1515 for step in range(duration-1):
1516 if hasattr(Y,"store"):
1517 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1519 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1521 Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
1522 Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
1523 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1524 Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
1526 if selfA._parameters["EstimationOf"] == "State":
1527 Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
1528 Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
1529 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1530 Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
1533 if hasattr(U,"store") and len(U)>1:
1534 Un = numpy.ravel( U[step] ).reshape((-1,1))
1535 elif hasattr(U,"store") and len(U)==1:
1536 Un = numpy.ravel( U[0] ).reshape((-1,1))
1538 Un = numpy.ravel( U ).reshape((-1,1))
1542 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1543 Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
1544 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1545 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1546 Xn_predicted = Xn_predicted + Cm * Un
1547 Pn_predicted = Q + Mt * (Pn * Ma)
1548 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1549 # --- > Par principe, M = Id, Q = 0
1553 if selfA._parameters["EstimationOf"] == "State":
1554 HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
1555 _Innovation = Ynpu - HX_predicted
1556 elif selfA._parameters["EstimationOf"] == "Parameters":
1557 HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
1558 _Innovation = Ynpu - HX_predicted
1559 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
1560 _Innovation = _Innovation - Cm * Un
1562 Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
1563 Xn = Xn_predicted + Kn * _Innovation
1564 Pn = Pn_predicted - Kn * Ht * Pn_predicted
1567 #--------------------------
1568 selfA._setInternalState("Xn", Xn)
1569 selfA._setInternalState("Pn", Pn)
1570 #--------------------------
1572 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1573 # ---> avec analysis
1574 selfA.StoredVariables["Analysis"].store( Xa )
1575 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1576 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
1577 if selfA._toStore("InnovationAtCurrentAnalysis"):
1578 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1579 # ---> avec current state
1580 if selfA._parameters["StoreInternalVariables"] \
1581 or selfA._toStore("CurrentState"):
1582 selfA.StoredVariables["CurrentState"].store( Xn )
1583 if selfA._toStore("ForecastState"):
1584 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1585 if selfA._toStore("ForecastCovariance"):
1586 selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
1587 if selfA._toStore("BMA"):
1588 selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
1589 if selfA._toStore("InnovationAtCurrentState"):
1590 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1591 if selfA._toStore("SimulatedObservationAtCurrentState") \
1592 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1593 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1595 if selfA._parameters["StoreInternalVariables"] \
1596 or selfA._toStore("CostFunctionJ") \
1597 or selfA._toStore("CostFunctionJb") \
1598 or selfA._toStore("CostFunctionJo") \
1599 or selfA._toStore("CurrentOptimum") \
1600 or selfA._toStore("APosterioriCovariance"):
1601 Jb = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
1602 Jo = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
1604 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1605 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1606 selfA.StoredVariables["CostFunctionJ" ].store( J )
1608 if selfA._toStore("IndexOfOptimum") \
1609 or selfA._toStore("CurrentOptimum") \
1610 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1611 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1612 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1613 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1614 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1615 if selfA._toStore("IndexOfOptimum"):
1616 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1617 if selfA._toStore("CurrentOptimum"):
1618 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1619 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1620 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1621 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1622 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1623 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1624 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1625 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1626 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1627 if selfA._toStore("APosterioriCovariance"):
1628 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1629 if selfA._parameters["EstimationOf"] == "Parameters" \
1630 and J < previousJMinimum:
1631 previousJMinimum = J
1633 if selfA._toStore("APosterioriCovariance"):
1634 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1636 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1637 # ----------------------------------------------------------------------
1638 if selfA._parameters["EstimationOf"] == "Parameters":
1639 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1640 selfA.StoredVariables["Analysis"].store( XaMin )
1641 if selfA._toStore("APosterioriCovariance"):
1642 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1643 if selfA._toStore("BMA"):
1644 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1648 # ==============================================================================
1649 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
1650 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1654 if selfA._parameters["EstimationOf"] == "Parameters":
1655 selfA._parameters["StoreInternalVariables"] = True
1658 H = HO["Direct"].appliedControledFormTo
1660 if selfA._parameters["EstimationOf"] == "State":
1661 M = EM["Direct"].appliedControledFormTo
1663 if CM is not None and "Tangent" in CM and U is not None:
1664 Cm = CM["Tangent"].asMatrix(Xb)
1668 # Durée d'observation et tailles
1669 if hasattr(Y,"stepnumber"):
1670 duration = Y.stepnumber()
1671 __p = numpy.cumprod(Y.shape())[-1]
1674 __p = numpy.array(Y).size
1676 # Précalcul des inversions de B et R
1677 if selfA._parameters["StoreInternalVariables"] \
1678 or selfA._toStore("CostFunctionJ") \
1679 or selfA._toStore("CostFunctionJb") \
1680 or selfA._toStore("CostFunctionJo") \
1681 or selfA._toStore("CurrentOptimum") \
1682 or selfA._toStore("APosterioriCovariance"):
1687 __m = selfA._parameters["NumberOfMembers"]
1689 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1690 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1692 Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
1693 selfA.StoredVariables["Analysis"].store( Xb )
1694 if selfA._toStore("APosterioriCovariance"):
1695 if hasattr(B,"asfullmatrix"):
1696 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1698 selfA.StoredVariables["APosterioriCovariance"].store( B )
1699 selfA._setInternalState("seed", numpy.random.get_state())
1700 elif selfA._parameters["nextStep"]:
1701 Xn = selfA._getInternalState("Xn")
1703 previousJMinimum = numpy.finfo(float).max
1705 for step in range(duration-1):
1706 numpy.random.set_state(selfA._getInternalState("seed"))
1707 if hasattr(Y,"store"):
1708 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1710 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1713 if hasattr(U,"store") and len(U)>1:
1714 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1715 elif hasattr(U,"store") and len(U)==1:
1716 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1718 Un = numpy.asmatrix(numpy.ravel( U )).T
1722 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1723 Xn = CovarianceInflation( Xn,
1724 selfA._parameters["InflationType"],
1725 selfA._parameters["InflationFactor"],
1728 #--------------------------
1729 if VariantM == "IEnKF12":
1730 Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
1731 EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
1735 Ta = numpy.identity(__m)
1736 vw = numpy.zeros(__m)
1737 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1738 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1741 E1 = vx1 + _epsilon * EaX
1743 E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1745 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
1746 E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1748 returnSerieAsArrayMatrix = True )
1749 elif selfA._parameters["EstimationOf"] == "Parameters":
1750 # --- > Par principe, M = Id
1752 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1753 vy1 = H((vx2, Un)).reshape((__p,1))
1755 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
1757 returnSerieAsArrayMatrix = True )
1758 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1761 EaY = (HE2 - vy2) / _epsilon
1763 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1765 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
1766 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
1767 Deltaw = - numpy.linalg.solve(mH,GradJ)
1772 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1776 A2 = EnsembleOfAnomalies( E2 )
1779 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1780 A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
1783 #--------------------------
1785 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1787 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1788 Xn = CovarianceInflation( Xn,
1789 selfA._parameters["InflationType"],
1790 selfA._parameters["InflationFactor"],
1793 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1794 #--------------------------
1795 selfA._setInternalState("Xn", Xn)
1796 selfA._setInternalState("seed", numpy.random.get_state())
1797 #--------------------------
1799 if selfA._parameters["StoreInternalVariables"] \
1800 or selfA._toStore("CostFunctionJ") \
1801 or selfA._toStore("CostFunctionJb") \
1802 or selfA._toStore("CostFunctionJo") \
1803 or selfA._toStore("APosterioriCovariance") \
1804 or selfA._toStore("InnovationAtCurrentAnalysis") \
1805 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1806 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1807 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1808 _Innovation = Ynpu - _HXa
1810 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1811 # ---> avec analysis
1812 selfA.StoredVariables["Analysis"].store( Xa )
1813 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1814 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1815 if selfA._toStore("InnovationAtCurrentAnalysis"):
1816 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1817 # ---> avec current state
1818 if selfA._parameters["StoreInternalVariables"] \
1819 or selfA._toStore("CurrentState"):
1820 selfA.StoredVariables["CurrentState"].store( Xn )
1821 if selfA._toStore("ForecastState"):
1822 selfA.StoredVariables["ForecastState"].store( E2 )
1823 if selfA._toStore("ForecastCovariance"):
1824 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(E2) )
1825 if selfA._toStore("BMA"):
1826 selfA.StoredVariables["BMA"].store( E2 - Xa )
1827 if selfA._toStore("InnovationAtCurrentState"):
1828 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1829 if selfA._toStore("SimulatedObservationAtCurrentState") \
1830 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1831 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1833 if selfA._parameters["StoreInternalVariables"] \
1834 or selfA._toStore("CostFunctionJ") \
1835 or selfA._toStore("CostFunctionJb") \
1836 or selfA._toStore("CostFunctionJo") \
1837 or selfA._toStore("CurrentOptimum") \
1838 or selfA._toStore("APosterioriCovariance"):
1839 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1840 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1842 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1843 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1844 selfA.StoredVariables["CostFunctionJ" ].store( J )
1846 if selfA._toStore("IndexOfOptimum") \
1847 or selfA._toStore("CurrentOptimum") \
1848 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1849 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1850 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1851 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1852 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1853 if selfA._toStore("IndexOfOptimum"):
1854 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1855 if selfA._toStore("CurrentOptimum"):
1856 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1857 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1858 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1859 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1860 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1861 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1862 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1863 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1864 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1865 if selfA._toStore("APosterioriCovariance"):
1866 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1867 if selfA._parameters["EstimationOf"] == "Parameters" \
1868 and J < previousJMinimum:
1869 previousJMinimum = J
1871 if selfA._toStore("APosterioriCovariance"):
1872 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1873 # ---> Pour les smoothers
1874 if selfA._toStore("CurrentEnsembleState"):
1875 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
1877 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1878 # ----------------------------------------------------------------------
1879 if selfA._parameters["EstimationOf"] == "Parameters":
1880 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1881 selfA.StoredVariables["Analysis"].store( XaMin )
1882 if selfA._toStore("APosterioriCovariance"):
1883 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1884 if selfA._toStore("BMA"):
1885 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1889 # ==============================================================================
1890 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1898 # Opérateur non-linéaire pour la boucle externe
1899 Hm = HO["Direct"].appliedTo
1901 # Précalcul des inversions de B et R
1905 # Point de démarrage de l'optimisation
1906 Xini = selfA._parameters["InitializationPoint"]
1908 HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
1909 Innovation = Y - HXb
1916 Xr = Xini.reshape((-1,1))
1917 while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
1921 Ht = HO["Tangent"].asMatrix(Xr)
1922 Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
1924 # Définition de la fonction-coût
1925 # ------------------------------
1926 def CostFunction(dx):
1927 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1928 if selfA._parameters["StoreInternalVariables"] or \
1929 selfA._toStore("CurrentState") or \
1930 selfA._toStore("CurrentOptimum"):
1931 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
1933 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1934 _dInnovation = Innovation - _HdX
1935 if selfA._toStore("SimulatedObservationAtCurrentState") or \
1936 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1937 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
1938 if selfA._toStore("InnovationAtCurrentState"):
1939 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
1941 Jb = float( 0.5 * _dX.T * BI * _dX )
1942 Jo = float( 0.5 * _dInnovation.T * RI * _dInnovation )
1945 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1946 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1947 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1948 selfA.StoredVariables["CostFunctionJ" ].store( J )
1949 if selfA._toStore("IndexOfOptimum") or \
1950 selfA._toStore("CurrentOptimum") or \
1951 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1952 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1953 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1954 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1955 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1956 if selfA._toStore("IndexOfOptimum"):
1957 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1958 if selfA._toStore("CurrentOptimum"):
1959 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1960 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1961 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1962 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1963 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1964 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1965 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1966 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1967 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1970 def GradientOfCostFunction(dx):
1971 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1973 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1974 _dInnovation = Innovation - _HdX
1976 GradJo = - Ht.T @ (RI * _dInnovation)
1977 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1980 # Minimisation de la fonctionnelle
1981 # --------------------------------
1982 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1984 if selfA._parameters["Minimizer"] == "LBFGSB":
1985 # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1986 if "0.19" <= scipy.version.version <= "1.1.0":
1987 import lbfgsbhlt as optimiseur
1989 import scipy.optimize as optimiseur
1990 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1991 func = CostFunction,
1992 x0 = numpy.zeros(Xini.size),
1993 fprime = GradientOfCostFunction,
1995 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
1996 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
1997 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
1998 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1999 iprint = selfA._parameters["optiprint"],
2001 nfeval = Informations['funcalls']
2002 rc = Informations['warnflag']
2003 elif selfA._parameters["Minimizer"] == "TNC":
2004 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2005 func = CostFunction,
2006 x0 = numpy.zeros(Xini.size),
2007 fprime = GradientOfCostFunction,
2009 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
2010 maxfun = selfA._parameters["MaximumNumberOfSteps"],
2011 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2012 ftol = selfA._parameters["CostDecrementTolerance"],
2013 messages = selfA._parameters["optmessages"],
2015 elif selfA._parameters["Minimizer"] == "CG":
2016 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2018 x0 = numpy.zeros(Xini.size),
2019 fprime = GradientOfCostFunction,
2021 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2022 gtol = selfA._parameters["GradientNormTolerance"],
2023 disp = selfA._parameters["optdisp"],
2026 elif selfA._parameters["Minimizer"] == "NCG":
2027 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2029 x0 = numpy.zeros(Xini.size),
2030 fprime = GradientOfCostFunction,
2032 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2033 avextol = selfA._parameters["CostDecrementTolerance"],
2034 disp = selfA._parameters["optdisp"],
2037 elif selfA._parameters["Minimizer"] == "BFGS":
2038 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2040 x0 = numpy.zeros(Xini.size),
2041 fprime = GradientOfCostFunction,
2043 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2044 gtol = selfA._parameters["GradientNormTolerance"],
2045 disp = selfA._parameters["optdisp"],
2049 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2051 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2052 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2054 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2055 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2056 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
2058 Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
2061 DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
2062 iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
2064 # Obtention de l'analyse
2065 # ----------------------
2068 selfA.StoredVariables["Analysis"].store( Xa )
2070 if selfA._toStore("OMA") or \
2071 selfA._toStore("SigmaObs2") or \
2072 selfA._toStore("SimulationQuantiles") or \
2073 selfA._toStore("SimulatedObservationAtOptimum"):
2074 if selfA._toStore("SimulatedObservationAtCurrentState"):
2075 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2076 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2077 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2081 # Calcul de la covariance d'analyse
2082 # ---------------------------------
2083 if selfA._toStore("APosterioriCovariance") or \
2084 selfA._toStore("SimulationQuantiles") or \
2085 selfA._toStore("JacobianMatrixAtOptimum") or \
2086 selfA._toStore("KalmanGainAtOptimum"):
2087 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2088 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2089 if selfA._toStore("APosterioriCovariance") or \
2090 selfA._toStore("SimulationQuantiles") or \
2091 selfA._toStore("KalmanGainAtOptimum"):
2092 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2093 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2094 if selfA._toStore("APosterioriCovariance") or \
2095 selfA._toStore("SimulationQuantiles"):
2099 _ee = numpy.matrix(numpy.zeros(nb)).T
2101 _HtEE = numpy.dot(HtM,_ee)
2102 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
2103 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2104 HessienneI = numpy.matrix( HessienneI )
2106 if min(A.shape) != max(A.shape):
2107 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2108 if (numpy.diag(A) < 0).any():
2109 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2110 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2112 L = numpy.linalg.cholesky( A )
2114 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2115 if selfA._toStore("APosterioriCovariance"):
2116 selfA.StoredVariables["APosterioriCovariance"].store( A )
2117 if selfA._toStore("JacobianMatrixAtOptimum"):
2118 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2119 if selfA._toStore("KalmanGainAtOptimum"):
2120 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2121 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2122 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2124 # Calculs et/ou stockages supplémentaires
2125 # ---------------------------------------
2126 if selfA._toStore("Innovation") or \
2127 selfA._toStore("SigmaObs2") or \
2128 selfA._toStore("MahalanobisConsistency") or \
2129 selfA._toStore("OMB"):
2131 if selfA._toStore("Innovation"):
2132 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2133 if selfA._toStore("BMA"):
2134 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2135 if selfA._toStore("OMA"):
2136 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2137 if selfA._toStore("OMB"):
2138 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2139 if selfA._toStore("SigmaObs2"):
2140 TraceR = R.trace(Y.size)
2141 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2142 if selfA._toStore("MahalanobisConsistency"):
2143 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2144 if selfA._toStore("SimulationQuantiles"):
2145 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
2146 if selfA._toStore("SimulatedObservationAtBackground"):
2147 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2148 if selfA._toStore("SimulatedObservationAtOptimum"):
2149 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2153 # ==============================================================================
2154 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
2155 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
2157 Maximum Likelihood Ensemble Filter
2159 if selfA._parameters["EstimationOf"] == "Parameters":
2160 selfA._parameters["StoreInternalVariables"] = True
2163 H = HO["Direct"].appliedControledFormTo
2165 if selfA._parameters["EstimationOf"] == "State":
2166 M = EM["Direct"].appliedControledFormTo
2168 if CM is not None and "Tangent" in CM and U is not None:
2169 Cm = CM["Tangent"].asMatrix(Xb)
2173 # Durée d'observation et tailles
2174 if hasattr(Y,"stepnumber"):
2175 duration = Y.stepnumber()
2176 __p = numpy.cumprod(Y.shape())[-1]
2179 __p = numpy.array(Y).size
2181 # Précalcul des inversions de B et R
2182 if selfA._parameters["StoreInternalVariables"] \
2183 or selfA._toStore("CostFunctionJ") \
2184 or selfA._toStore("CostFunctionJb") \
2185 or selfA._toStore("CostFunctionJo") \
2186 or selfA._toStore("CurrentOptimum") \
2187 or selfA._toStore("APosterioriCovariance"):
2192 __m = selfA._parameters["NumberOfMembers"]
2194 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2195 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2196 selfA.StoredVariables["Analysis"].store( Xb )
2197 if selfA._toStore("APosterioriCovariance"):
2198 if hasattr(B,"asfullmatrix"):
2199 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
2201 selfA.StoredVariables["APosterioriCovariance"].store( B )
2202 selfA._setInternalState("seed", numpy.random.get_state())
2203 elif selfA._parameters["nextStep"]:
2204 Xn = selfA._getInternalState("Xn")
2206 previousJMinimum = numpy.finfo(float).max
2208 for step in range(duration-1):
2209 numpy.random.set_state(selfA._getInternalState("seed"))
2210 if hasattr(Y,"store"):
2211 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2213 Ynpu = numpy.ravel( Y ).reshape((__p,1))
2216 if hasattr(U,"store") and len(U)>1:
2217 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2218 elif hasattr(U,"store") and len(U)==1:
2219 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2221 Un = numpy.asmatrix(numpy.ravel( U )).T
2225 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2226 Xn = CovarianceInflation( Xn,
2227 selfA._parameters["InflationType"],
2228 selfA._parameters["InflationFactor"],
2231 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2232 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2234 returnSerieAsArrayMatrix = True )
2235 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
2236 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2237 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2238 Xn_predicted = Xn_predicted + Cm * Un
2239 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2240 # --- > Par principe, M = Id, Q = 0
2241 Xn_predicted = EMX = Xn
2243 #--------------------------
2244 if VariantM == "MLEF13":
2245 Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
2246 EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
2247 Ua = numpy.identity(__m)
2251 Ta = numpy.identity(__m)
2252 vw = numpy.zeros(__m)
2253 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2254 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
2257 E1 = vx1 + _epsilon * EaX
2259 E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
2261 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2263 returnSerieAsArrayMatrix = True )
2264 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2267 EaY = (HE2 - vy2) / _epsilon
2269 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
2271 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
2272 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
2273 Deltaw = - numpy.linalg.solve(mH,GradJ)
2278 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2283 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2285 Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
2286 #--------------------------
2288 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2290 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2291 Xn = CovarianceInflation( Xn,
2292 selfA._parameters["InflationType"],
2293 selfA._parameters["InflationFactor"],
2296 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2297 #--------------------------
2298 selfA._setInternalState("Xn", Xn)
2299 selfA._setInternalState("seed", numpy.random.get_state())
2300 #--------------------------
2302 if selfA._parameters["StoreInternalVariables"] \
2303 or selfA._toStore("CostFunctionJ") \
2304 or selfA._toStore("CostFunctionJb") \
2305 or selfA._toStore("CostFunctionJo") \
2306 or selfA._toStore("APosterioriCovariance") \
2307 or selfA._toStore("InnovationAtCurrentAnalysis") \
2308 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2309 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2310 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2311 _Innovation = Ynpu - _HXa
2313 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2314 # ---> avec analysis
2315 selfA.StoredVariables["Analysis"].store( Xa )
2316 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2317 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2318 if selfA._toStore("InnovationAtCurrentAnalysis"):
2319 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2320 # ---> avec current state
2321 if selfA._parameters["StoreInternalVariables"] \
2322 or selfA._toStore("CurrentState"):
2323 selfA.StoredVariables["CurrentState"].store( Xn )
2324 if selfA._toStore("ForecastState"):
2325 selfA.StoredVariables["ForecastState"].store( EMX )
2326 if selfA._toStore("ForecastCovariance"):
2327 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
2328 if selfA._toStore("BMA"):
2329 selfA.StoredVariables["BMA"].store( EMX - Xa )
2330 if selfA._toStore("InnovationAtCurrentState"):
2331 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
2332 if selfA._toStore("SimulatedObservationAtCurrentState") \
2333 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2334 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2336 if selfA._parameters["StoreInternalVariables"] \
2337 or selfA._toStore("CostFunctionJ") \
2338 or selfA._toStore("CostFunctionJb") \
2339 or selfA._toStore("CostFunctionJo") \
2340 or selfA._toStore("CurrentOptimum") \
2341 or selfA._toStore("APosterioriCovariance"):
2342 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2343 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2345 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2346 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2347 selfA.StoredVariables["CostFunctionJ" ].store( J )
2349 if selfA._toStore("IndexOfOptimum") \
2350 or selfA._toStore("CurrentOptimum") \
2351 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2352 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2353 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2354 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2355 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2356 if selfA._toStore("IndexOfOptimum"):
2357 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2358 if selfA._toStore("CurrentOptimum"):
2359 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2360 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2361 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2362 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2363 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2364 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2365 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2366 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2367 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2368 if selfA._toStore("APosterioriCovariance"):
2369 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2370 if selfA._parameters["EstimationOf"] == "Parameters" \
2371 and J < previousJMinimum:
2372 previousJMinimum = J
2374 if selfA._toStore("APosterioriCovariance"):
2375 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
2376 # ---> Pour les smoothers
2377 if selfA._toStore("CurrentEnsembleState"):
2378 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
2380 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2381 # ----------------------------------------------------------------------
2382 if selfA._parameters["EstimationOf"] == "Parameters":
2383 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2384 selfA.StoredVariables["Analysis"].store( XaMin )
2385 if selfA._toStore("APosterioriCovariance"):
2386 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2387 if selfA._toStore("BMA"):
2388 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2392 # ==============================================================================
2404 Implémentation informatique de l'algorithme MMQR, basée sur la publication :
2405 David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
2406 Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
2409 # Recuperation des donnees et informations initiales
2410 # --------------------------------------------------
2411 variables = numpy.ravel( x0 )
2412 mesures = numpy.ravel( y )
2413 increment = sys.float_info[0]
2416 quantile = float(quantile)
2418 # Calcul des parametres du MM
2419 # ---------------------------
2420 tn = float(toler) / n
2421 e0 = -tn / math.log(tn)
2422 epsilon = (e0-tn)/(1+math.log(e0))
2424 # Calculs d'initialisation
2425 # ------------------------
2426 residus = mesures - numpy.ravel( func( variables ) )
2427 poids = 1./(epsilon+numpy.abs(residus))
2428 veps = 1. - 2. * quantile - residus * poids
2429 lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
2432 # Recherche iterative
2433 # -------------------
2434 while (increment > toler) and (iteration < maxfun) :
2437 Derivees = numpy.array(fprime(variables))
2438 Derivees = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
2439 DeriveesT = Derivees.transpose()
2440 M = numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
2441 SM = numpy.transpose(numpy.dot( DeriveesT , veps ))
2442 step = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
2444 variables = variables + step
2445 if bounds is not None:
2446 # Attention : boucle infinie à éviter si un intervalle est trop petit
2447 while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
2449 variables = variables - step
2450 residus = mesures - numpy.ravel( func(variables) )
2451 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2453 while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
2455 variables = variables - step
2456 residus = mesures - numpy.ravel( func(variables) )
2457 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2459 increment = lastsurrogate-surrogate
2460 poids = 1./(epsilon+numpy.abs(residus))
2461 veps = 1. - 2. * quantile - residus * poids
2462 lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
2466 Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
2468 return variables, Ecart, [n,p,iteration,increment,0]
2470 # ==============================================================================
2471 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
2473 3DVAR multi-pas et multi-méthodes
2477 if selfA._parameters["EstimationOf"] == "State":
2478 M = EM["Direct"].appliedTo
2480 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2481 Xn = numpy.ravel(Xb).reshape((-1,1))
2482 selfA.StoredVariables["Analysis"].store( Xn )
2483 if selfA._toStore("APosterioriCovariance"):
2484 if hasattr(B,"asfullmatrix"):
2485 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(Xn.size) )
2487 selfA.StoredVariables["APosterioriCovariance"].store( B )
2488 if selfA._toStore("ForecastState"):
2489 selfA.StoredVariables["ForecastState"].store( Xn )
2490 elif selfA._parameters["nextStep"]:
2491 Xn = selfA._getInternalState("Xn")
2493 Xn = numpy.ravel(Xb).reshape((-1,1))
2495 if hasattr(Y,"stepnumber"):
2496 duration = Y.stepnumber()
2501 for step in range(duration-1):
2502 if hasattr(Y,"store"):
2503 Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
2505 Ynpu = numpy.ravel( Y ).reshape((-1,1))
2507 if selfA._parameters["EstimationOf"] == "State": # Forecast
2508 Xn_predicted = M( Xn )
2509 if selfA._toStore("ForecastState"):
2510 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
2511 elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
2512 # --- > Par principe, M = Id, Q = 0
2514 Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
2516 oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
2518 Xn = selfA.StoredVariables["Analysis"][-1]
2519 #--------------------------
2520 selfA._setInternalState("Xn", Xn)
2524 # ==============================================================================
2525 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2534 Hm = HO["Direct"].appliedTo
2536 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
2537 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2538 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2541 HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2542 if Y.size != HXb.size:
2543 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2544 if max(Y.shape) != max(HXb.shape):
2545 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2547 if selfA._toStore("JacobianMatrixAtBackground"):
2548 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2549 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2550 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2552 Ht = HO["Tangent"].asMatrix(Xb)
2554 HBHTpR = R + Ht * BHT
2555 Innovation = Y - HXb
2557 # Point de démarrage de l'optimisation
2558 Xini = numpy.zeros(Xb.shape)
2560 # Définition de la fonction-coût
2561 # ------------------------------
2562 def CostFunction(w):
2563 _W = numpy.asmatrix(numpy.ravel( w )).T
2564 if selfA._parameters["StoreInternalVariables"] or \
2565 selfA._toStore("CurrentState") or \
2566 selfA._toStore("CurrentOptimum"):
2567 selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
2568 if selfA._toStore("SimulatedObservationAtCurrentState") or \
2569 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2570 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
2571 if selfA._toStore("InnovationAtCurrentState"):
2572 selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
2574 Jb = float( 0.5 * _W.T * HBHTpR * _W )
2575 Jo = float( - _W.T * Innovation )
2578 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2579 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2580 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2581 selfA.StoredVariables["CostFunctionJ" ].store( J )
2582 if selfA._toStore("IndexOfOptimum") or \
2583 selfA._toStore("CurrentOptimum") or \
2584 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2585 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2586 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2587 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2588 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2589 if selfA._toStore("IndexOfOptimum"):
2590 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2591 if selfA._toStore("CurrentOptimum"):
2592 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2593 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2594 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2595 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2596 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2597 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2598 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2599 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2600 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2603 def GradientOfCostFunction(w):
2604 _W = numpy.asmatrix(numpy.ravel( w )).T
2605 GradJb = HBHTpR * _W
2606 GradJo = - Innovation
2607 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2610 # Minimisation de la fonctionnelle
2611 # --------------------------------
2612 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2614 if selfA._parameters["Minimizer"] == "LBFGSB":
2615 if "0.19" <= scipy.version.version <= "1.1.0":
2616 import lbfgsbhlt as optimiseur
2618 import scipy.optimize as optimiseur
2619 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2620 func = CostFunction,
2622 fprime = GradientOfCostFunction,
2624 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
2625 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
2626 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
2627 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2628 iprint = selfA._parameters["optiprint"],
2630 nfeval = Informations['funcalls']
2631 rc = Informations['warnflag']
2632 elif selfA._parameters["Minimizer"] == "TNC":
2633 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2634 func = CostFunction,
2636 fprime = GradientOfCostFunction,
2638 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
2639 maxfun = selfA._parameters["MaximumNumberOfSteps"],
2640 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2641 ftol = selfA._parameters["CostDecrementTolerance"],
2642 messages = selfA._parameters["optmessages"],
2644 elif selfA._parameters["Minimizer"] == "CG":
2645 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2648 fprime = GradientOfCostFunction,
2650 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2651 gtol = selfA._parameters["GradientNormTolerance"],
2652 disp = selfA._parameters["optdisp"],
2655 elif selfA._parameters["Minimizer"] == "NCG":
2656 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2659 fprime = GradientOfCostFunction,
2661 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2662 avextol = selfA._parameters["CostDecrementTolerance"],
2663 disp = selfA._parameters["optdisp"],
2666 elif selfA._parameters["Minimizer"] == "BFGS":
2667 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2670 fprime = GradientOfCostFunction,
2672 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2673 gtol = selfA._parameters["GradientNormTolerance"],
2674 disp = selfA._parameters["optdisp"],
2678 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2680 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2681 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2683 # Correction pour pallier a un bug de TNC sur le retour du Minimum
2684 # ----------------------------------------------------------------
2685 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2686 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2687 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
2689 Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
2691 # Obtention de l'analyse
2692 # ----------------------
2695 selfA.StoredVariables["Analysis"].store( Xa )
2697 if selfA._toStore("OMA") or \
2698 selfA._toStore("SigmaObs2") or \
2699 selfA._toStore("SimulationQuantiles") or \
2700 selfA._toStore("SimulatedObservationAtOptimum"):
2701 if selfA._toStore("SimulatedObservationAtCurrentState"):
2702 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2703 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2704 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2708 # Calcul de la covariance d'analyse
2709 # ---------------------------------
2710 if selfA._toStore("APosterioriCovariance") or \
2711 selfA._toStore("SimulationQuantiles") or \
2712 selfA._toStore("JacobianMatrixAtOptimum") or \
2713 selfA._toStore("KalmanGainAtOptimum"):
2714 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2715 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2716 if selfA._toStore("APosterioriCovariance") or \
2717 selfA._toStore("SimulationQuantiles") or \
2718 selfA._toStore("KalmanGainAtOptimum"):
2719 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2720 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2721 if selfA._toStore("APosterioriCovariance") or \
2722 selfA._toStore("SimulationQuantiles"):
2728 _ee = numpy.matrix(numpy.zeros(nb)).T
2730 _HtEE = numpy.dot(HtM,_ee)
2731 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
2732 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2733 HessienneI = numpy.matrix( HessienneI )
2735 if min(A.shape) != max(A.shape):
2736 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2737 if (numpy.diag(A) < 0).any():
2738 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2739 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2741 L = numpy.linalg.cholesky( A )
2743 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2744 if selfA._toStore("APosterioriCovariance"):
2745 selfA.StoredVariables["APosterioriCovariance"].store( A )
2746 if selfA._toStore("JacobianMatrixAtOptimum"):
2747 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2748 if selfA._toStore("KalmanGainAtOptimum"):
2749 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2750 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2751 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2753 # Calculs et/ou stockages supplémentaires
2754 # ---------------------------------------
2755 if selfA._toStore("Innovation") or \
2756 selfA._toStore("SigmaObs2") or \
2757 selfA._toStore("MahalanobisConsistency") or \
2758 selfA._toStore("OMB"):
2760 if selfA._toStore("Innovation"):
2761 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2762 if selfA._toStore("BMA"):
2763 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2764 if selfA._toStore("OMA"):
2765 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2766 if selfA._toStore("OMB"):
2767 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2768 if selfA._toStore("SigmaObs2"):
2769 TraceR = R.trace(Y.size)
2770 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2771 if selfA._toStore("MahalanobisConsistency"):
2772 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2773 if selfA._toStore("SimulationQuantiles"):
2774 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
2775 if selfA._toStore("SimulatedObservationAtBackground"):
2776 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2777 if selfA._toStore("SimulatedObservationAtOptimum"):
2778 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2782 # ==============================================================================
2783 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16"):
2787 if selfA._parameters["EstimationOf"] == "Parameters":
2788 selfA._parameters["StoreInternalVariables"] = True
2791 H = HO["Direct"].appliedControledFormTo
2793 if selfA._parameters["EstimationOf"] == "State":
2794 M = EM["Direct"].appliedControledFormTo
2796 if CM is not None and "Tangent" in CM and U is not None:
2797 Cm = CM["Tangent"].asMatrix(Xb)
2801 # Durée d'observation et tailles
2802 if hasattr(Y,"stepnumber"):
2803 duration = Y.stepnumber()
2804 __p = numpy.cumprod(Y.shape())[-1]
2807 __p = numpy.array(Y).size
2809 # Précalcul des inversions de B et R
2810 if selfA._parameters["StoreInternalVariables"] \
2811 or selfA._toStore("CostFunctionJ") \
2812 or selfA._toStore("CostFunctionJb") \
2813 or selfA._toStore("CostFunctionJo") \
2814 or selfA._toStore("CurrentOptimum") \
2815 or selfA._toStore("APosterioriCovariance"):
2820 __m = selfA._parameters["NumberOfMembers"]
2822 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2825 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2826 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2828 Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
2829 selfA.StoredVariables["Analysis"].store( Xb )
2830 if selfA._toStore("APosterioriCovariance"):
2831 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2832 selfA._setInternalState("seed", numpy.random.get_state())
2833 elif selfA._parameters["nextStep"]:
2834 Xn = selfA._getInternalState("Xn")
2836 previousJMinimum = numpy.finfo(float).max
2838 for step in range(duration-1):
2839 numpy.random.set_state(selfA._getInternalState("seed"))
2840 if hasattr(Y,"store"):
2841 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2843 Ynpu = numpy.ravel( Y ).reshape((__p,1))
2846 if hasattr(U,"store") and len(U)>1:
2847 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2848 elif hasattr(U,"store") and len(U)==1:
2849 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2851 Un = numpy.asmatrix(numpy.ravel( U )).T
2855 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2856 Xn = CovarianceInflation( Xn,
2857 selfA._parameters["InflationType"],
2858 selfA._parameters["InflationFactor"],
2861 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2862 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2864 returnSerieAsArrayMatrix = True )
2865 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
2866 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2868 returnSerieAsArrayMatrix = True )
2869 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2870 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2871 Xn_predicted = Xn_predicted + Cm * Un
2872 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2873 # --- > Par principe, M = Id, Q = 0
2874 Xn_predicted = EMX = Xn
2875 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2877 returnSerieAsArrayMatrix = True )
2879 # Mean of forecast and observation of forecast
2880 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2881 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2883 #--------------------------
2884 if VariantM == "KalmanFilterFormula05":
2885 PfHT, HPfHT = 0., 0.
2886 for i in range(__m):
2887 Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
2888 Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
2889 PfHT += Exfi * Eyfi.T
2890 HPfHT += Eyfi * Eyfi.T
2891 PfHT = (1./(__m-1)) * PfHT
2892 HPfHT = (1./(__m-1)) * HPfHT
2893 Kn = PfHT * ( R + HPfHT ).I
2896 for i in range(__m):
2897 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
2898 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
2899 #--------------------------
2900 elif VariantM == "KalmanFilterFormula16":
2901 EpY = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
2902 EpYm = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2904 EaX = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
2905 EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
2907 Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
2909 for i in range(__m):
2910 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
2911 #--------------------------
2913 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2915 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2916 Xn = CovarianceInflation( Xn,
2917 selfA._parameters["InflationType"],
2918 selfA._parameters["InflationFactor"],
2921 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2922 #--------------------------
2923 selfA._setInternalState("Xn", Xn)
2924 selfA._setInternalState("seed", numpy.random.get_state())
2925 #--------------------------
2927 if selfA._parameters["StoreInternalVariables"] \
2928 or selfA._toStore("CostFunctionJ") \
2929 or selfA._toStore("CostFunctionJb") \
2930 or selfA._toStore("CostFunctionJo") \
2931 or selfA._toStore("APosterioriCovariance") \
2932 or selfA._toStore("InnovationAtCurrentAnalysis") \
2933 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2934 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2935 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2936 _Innovation = Ynpu - _HXa
2938 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2939 # ---> avec analysis
2940 selfA.StoredVariables["Analysis"].store( Xa )
2941 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2942 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2943 if selfA._toStore("InnovationAtCurrentAnalysis"):
2944 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2945 # ---> avec current state
2946 if selfA._parameters["StoreInternalVariables"] \
2947 or selfA._toStore("CurrentState"):
2948 selfA.StoredVariables["CurrentState"].store( Xn )
2949 if selfA._toStore("ForecastState"):
2950 selfA.StoredVariables["ForecastState"].store( EMX )
2951 if selfA._toStore("ForecastCovariance"):
2952 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
2953 if selfA._toStore("BMA"):
2954 selfA.StoredVariables["BMA"].store( EMX - Xa )
2955 if selfA._toStore("InnovationAtCurrentState"):
2956 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
2957 if selfA._toStore("SimulatedObservationAtCurrentState") \
2958 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2959 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2961 if selfA._parameters["StoreInternalVariables"] \
2962 or selfA._toStore("CostFunctionJ") \
2963 or selfA._toStore("CostFunctionJb") \
2964 or selfA._toStore("CostFunctionJo") \
2965 or selfA._toStore("CurrentOptimum") \
2966 or selfA._toStore("APosterioriCovariance"):
2967 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2968 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2970 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2971 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2972 selfA.StoredVariables["CostFunctionJ" ].store( J )
2974 if selfA._toStore("IndexOfOptimum") \
2975 or selfA._toStore("CurrentOptimum") \
2976 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2977 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2978 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2979 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2980 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2981 if selfA._toStore("IndexOfOptimum"):
2982 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2983 if selfA._toStore("CurrentOptimum"):
2984 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2985 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2986 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2987 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2988 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2989 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2990 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2991 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2992 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2993 if selfA._toStore("APosterioriCovariance"):
2994 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2995 if selfA._parameters["EstimationOf"] == "Parameters" \
2996 and J < previousJMinimum:
2997 previousJMinimum = J
2999 if selfA._toStore("APosterioriCovariance"):
3000 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
3001 # ---> Pour les smoothers
3002 if selfA._toStore("CurrentEnsembleState"):
3003 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
3005 # Stockage final supplémentaire de l'optimum en estimation de paramètres
3006 # ----------------------------------------------------------------------
3007 if selfA._parameters["EstimationOf"] == "Parameters":
3008 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3009 selfA.StoredVariables["Analysis"].store( XaMin )
3010 if selfA._toStore("APosterioriCovariance"):
3011 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3012 if selfA._toStore("BMA"):
3013 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3017 # ==============================================================================
3018 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3027 Hm = HO["Direct"].appliedTo
3028 Ha = HO["Adjoint"].appliedInXTo
3030 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
3031 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
3032 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
3035 HXb = HXb.reshape((-1,1))
3036 if Y.size != HXb.size:
3037 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
3038 if max(Y.shape) != max(HXb.shape):
3039 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
3041 if selfA._toStore("JacobianMatrixAtBackground"):
3042 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
3043 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
3044 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
3046 # Précalcul des inversions de B et R
3050 # Point de démarrage de l'optimisation
3051 Xini = selfA._parameters["InitializationPoint"]
3053 # Définition de la fonction-coût
3054 # ------------------------------
3055 def CostFunction(x):
3056 _X = numpy.ravel( x ).reshape((-1,1))
3057 if selfA._parameters["StoreInternalVariables"] or \
3058 selfA._toStore("CurrentState") or \
3059 selfA._toStore("CurrentOptimum"):
3060 selfA.StoredVariables["CurrentState"].store( _X )
3061 _HX = Hm( _X ).reshape((-1,1))
3062 _Innovation = Y - _HX
3063 if selfA._toStore("SimulatedObservationAtCurrentState") or \
3064 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3065 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
3066 if selfA._toStore("InnovationAtCurrentState"):
3067 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3069 Jb = float( 0.5 * (_X - Xb).T * (BI * (_X - Xb)) )
3070 Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
3073 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3074 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3075 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3076 selfA.StoredVariables["CostFunctionJ" ].store( J )
3077 if selfA._toStore("IndexOfOptimum") or \
3078 selfA._toStore("CurrentOptimum") or \
3079 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3080 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3081 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
3082 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3083 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3084 if selfA._toStore("IndexOfOptimum"):
3085 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3086 if selfA._toStore("CurrentOptimum"):
3087 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3088 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3089 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
3090 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3091 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3092 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3093 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3094 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3095 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3098 def GradientOfCostFunction(x):
3099 _X = numpy.ravel( x ).reshape((-1,1))
3101 _HX = numpy.ravel( _HX ).reshape((-1,1))
3102 GradJb = BI * (_X - Xb)
3103 GradJo = - Ha( (_X, RI * (Y - _HX)) )
3104 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
3107 # Minimisation de la fonctionnelle
3108 # --------------------------------
3109 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3111 if selfA._parameters["Minimizer"] == "LBFGSB":
3112 if "0.19" <= scipy.version.version <= "1.1.0":
3113 import lbfgsbhlt as optimiseur
3115 import scipy.optimize as optimiseur
3116 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3117 func = CostFunction,
3119 fprime = GradientOfCostFunction,
3121 bounds = selfA._parameters["Bounds"],
3122 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
3123 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
3124 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3125 iprint = selfA._parameters["optiprint"],
3127 nfeval = Informations['funcalls']
3128 rc = Informations['warnflag']
3129 elif selfA._parameters["Minimizer"] == "TNC":
3130 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3131 func = CostFunction,
3133 fprime = GradientOfCostFunction,
3135 bounds = selfA._parameters["Bounds"],
3136 maxfun = selfA._parameters["MaximumNumberOfSteps"],
3137 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3138 ftol = selfA._parameters["CostDecrementTolerance"],
3139 messages = selfA._parameters["optmessages"],
3141 elif selfA._parameters["Minimizer"] == "CG":
3142 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3145 fprime = GradientOfCostFunction,
3147 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3148 gtol = selfA._parameters["GradientNormTolerance"],
3149 disp = selfA._parameters["optdisp"],
3152 elif selfA._parameters["Minimizer"] == "NCG":
3153 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3156 fprime = GradientOfCostFunction,
3158 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3159 avextol = selfA._parameters["CostDecrementTolerance"],
3160 disp = selfA._parameters["optdisp"],
3163 elif selfA._parameters["Minimizer"] == "BFGS":
3164 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3167 fprime = GradientOfCostFunction,
3169 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3170 gtol = selfA._parameters["GradientNormTolerance"],
3171 disp = selfA._parameters["optdisp"],
3175 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3177 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3178 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3180 # Correction pour pallier a un bug de TNC sur le retour du Minimum
3181 # ----------------------------------------------------------------
3182 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3183 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3186 #--------------------------
3188 selfA.StoredVariables["Analysis"].store( Xa )
3190 if selfA._toStore("OMA") or \
3191 selfA._toStore("SigmaObs2") or \
3192 selfA._toStore("SimulationQuantiles") or \
3193 selfA._toStore("SimulatedObservationAtOptimum"):
3194 if selfA._toStore("SimulatedObservationAtCurrentState"):
3195 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3196 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3197 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3201 if selfA._toStore("APosterioriCovariance") or \
3202 selfA._toStore("SimulationQuantiles") or \
3203 selfA._toStore("JacobianMatrixAtOptimum") or \
3204 selfA._toStore("KalmanGainAtOptimum"):
3205 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3206 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3207 if selfA._toStore("APosterioriCovariance") or \
3208 selfA._toStore("SimulationQuantiles") or \
3209 selfA._toStore("KalmanGainAtOptimum"):
3210 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3211 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3212 if selfA._toStore("APosterioriCovariance") or \
3213 selfA._toStore("SimulationQuantiles"):
3217 _ee = numpy.zeros(nb)
3219 _HtEE = numpy.dot(HtM,_ee)
3220 HessienneI.append( numpy.ravel( BI * _ee.reshape((-1,1)) + HaM * (RI * _HtEE.reshape((-1,1))) ) )
3221 A = numpy.linalg.inv(numpy.array( HessienneI ))
3222 if min(A.shape) != max(A.shape):
3223 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
3224 if (numpy.diag(A) < 0).any():
3225 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
3226 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
3228 L = numpy.linalg.cholesky( A )
3230 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
3231 if selfA._toStore("APosterioriCovariance"):
3232 selfA.StoredVariables["APosterioriCovariance"].store( A )
3233 if selfA._toStore("JacobianMatrixAtOptimum"):
3234 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3235 if selfA._toStore("KalmanGainAtOptimum"):
3236 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3237 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3238 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3240 if selfA._toStore("Innovation") or \
3241 selfA._toStore("SigmaObs2") or \
3242 selfA._toStore("MahalanobisConsistency") or \
3243 selfA._toStore("OMB"):
3245 if selfA._toStore("Innovation"):
3246 selfA.StoredVariables["Innovation"].store( d )
3247 if selfA._toStore("BMA"):
3248 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3249 if selfA._toStore("OMA"):
3250 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3251 if selfA._toStore("OMB"):
3252 selfA.StoredVariables["OMB"].store( d )
3253 if selfA._toStore("SigmaObs2"):
3254 TraceR = R.trace(Y.size)
3255 selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
3256 if selfA._toStore("MahalanobisConsistency"):
3257 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3258 if selfA._toStore("SimulationQuantiles"):
3259 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
3260 if selfA._toStore("SimulatedObservationAtBackground"):
3261 selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
3262 if selfA._toStore("SimulatedObservationAtOptimum"):
3263 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
3267 # ==============================================================================
3268 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3277 Hm = HO["Direct"].appliedControledFormTo
3278 Mm = EM["Direct"].appliedControledFormTo
3280 if CM is not None and "Tangent" in CM and U is not None:
3281 Cm = CM["Tangent"].asMatrix(Xb)
3287 if hasattr(U,"store") and 1<=_step<len(U) :
3288 _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
3289 elif hasattr(U,"store") and len(U)==1:
3290 _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
3292 _Un = numpy.asmatrix(numpy.ravel( U )).T
3297 if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
3298 _Cm = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
3304 # Remarque : les observations sont exploitées à partir du pas de temps
3305 # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
3306 # Donc le pas 0 n'est pas utilisé puisque la première étape commence
3307 # avec l'observation du pas 1.
3309 # Nombre de pas identique au nombre de pas d'observations
3310 if hasattr(Y,"stepnumber"):
3311 duration = Y.stepnumber()
3315 # Précalcul des inversions de B et R
3319 # Point de démarrage de l'optimisation
3320 Xini = selfA._parameters["InitializationPoint"]
3322 # Définition de la fonction-coût
3323 # ------------------------------
3324 selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
3325 selfA.DirectInnovation = [None,] # Le pas 0 n'est pas observé
3326 def CostFunction(x):
3327 _X = numpy.asmatrix(numpy.ravel( x )).T
3328 if selfA._parameters["StoreInternalVariables"] or \
3329 selfA._toStore("CurrentState") or \
3330 selfA._toStore("CurrentOptimum"):
3331 selfA.StoredVariables["CurrentState"].store( _X )
3332 Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
3333 selfA.DirectCalculation = [None,]
3334 selfA.DirectInnovation = [None,]
3337 for step in range(0,duration-1):
3338 if hasattr(Y,"store"):
3339 _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
3341 _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
3345 if selfA._parameters["EstimationOf"] == "State":
3346 _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
3347 elif selfA._parameters["EstimationOf"] == "Parameters":
3350 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3351 _Xn = ApplyBounds( _Xn, ForceNumericBounds(selfA._parameters["Bounds"]) )
3353 # Etape de différence aux observations
3354 if selfA._parameters["EstimationOf"] == "State":
3355 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
3356 elif selfA._parameters["EstimationOf"] == "Parameters":
3357 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
3359 # Stockage de l'état
3360 selfA.DirectCalculation.append( _Xn )
3361 selfA.DirectInnovation.append( _YmHMX )
3363 # Ajout dans la fonctionnelle d'observation
3364 Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
3367 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3368 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3369 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3370 selfA.StoredVariables["CostFunctionJ" ].store( J )
3371 if selfA._toStore("IndexOfOptimum") or \
3372 selfA._toStore("CurrentOptimum") or \
3373 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3374 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3375 selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3376 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3377 if selfA._toStore("IndexOfOptimum"):
3378 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3379 if selfA._toStore("CurrentOptimum"):
3380 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3381 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3382 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3383 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3384 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3385 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3386 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3389 def GradientOfCostFunction(x):
3390 _X = numpy.asmatrix(numpy.ravel( x )).T
3391 GradJb = BI * (_X - Xb)
3393 for step in range(duration-1,0,-1):
3394 # Étape de récupération du dernier stockage de l'évolution
3395 _Xn = selfA.DirectCalculation.pop()
3396 # Étape de récupération du dernier stockage de l'innovation
3397 _YmHMX = selfA.DirectInnovation.pop()
3398 # Calcul des adjoints
3399 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
3400 Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
3401 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
3402 Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
3403 # Calcul du gradient par état adjoint
3404 GradJo = GradJo + Ha * (RI * _YmHMX) # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
3405 GradJo = Ma * GradJo # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
3406 GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
3409 # Minimisation de la fonctionnelle
3410 # --------------------------------
3411 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3413 if selfA._parameters["Minimizer"] == "LBFGSB":
3414 if "0.19" <= scipy.version.version <= "1.1.0":
3415 import lbfgsbhlt as optimiseur
3417 import scipy.optimize as optimiseur
3418 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3419 func = CostFunction,
3421 fprime = GradientOfCostFunction,
3423 bounds = selfA._parameters["Bounds"],
3424 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
3425 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
3426 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3427 iprint = selfA._parameters["optiprint"],
3429 nfeval = Informations['funcalls']
3430 rc = Informations['warnflag']
3431 elif selfA._parameters["Minimizer"] == "TNC":
3432 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3433 func = CostFunction,
3435 fprime = GradientOfCostFunction,
3437 bounds = selfA._parameters["Bounds"],
3438 maxfun = selfA._parameters["MaximumNumberOfSteps"],
3439 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3440 ftol = selfA._parameters["CostDecrementTolerance"],
3441 messages = selfA._parameters["optmessages"],
3443 elif selfA._parameters["Minimizer"] == "CG":
3444 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3447 fprime = GradientOfCostFunction,
3449 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3450 gtol = selfA._parameters["GradientNormTolerance"],
3451 disp = selfA._parameters["optdisp"],
3454 elif selfA._parameters["Minimizer"] == "NCG":
3455 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3458 fprime = GradientOfCostFunction,
3460 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3461 avextol = selfA._parameters["CostDecrementTolerance"],
3462 disp = selfA._parameters["optdisp"],
3465 elif selfA._parameters["Minimizer"] == "BFGS":
3466 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3469 fprime = GradientOfCostFunction,
3471 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3472 gtol = selfA._parameters["GradientNormTolerance"],
3473 disp = selfA._parameters["optdisp"],
3477 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3479 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3480 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3482 # Correction pour pallier a un bug de TNC sur le retour du Minimum
3483 # ----------------------------------------------------------------
3484 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3485 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3487 # Obtention de l'analyse
3488 # ----------------------
3491 selfA.StoredVariables["Analysis"].store( Xa )
3493 # Calculs et/ou stockages supplémentaires
3494 # ---------------------------------------
3495 if selfA._toStore("BMA"):
3496 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3500 # ==============================================================================
3501 def stdkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3503 Standard Kalman Filter
3505 if selfA._parameters["EstimationOf"] == "Parameters":
3506 selfA._parameters["StoreInternalVariables"] = True
3510 Ht = HO["Tangent"].asMatrix(Xb)
3511 Ha = HO["Adjoint"].asMatrix(Xb)
3513 if selfA._parameters["EstimationOf"] == "State":
3514 Mt = EM["Tangent"].asMatrix(Xb)
3515 Ma = EM["Adjoint"].asMatrix(Xb)
3517 if CM is not None and "Tangent" in CM and U is not None:
3518 Cm = CM["Tangent"].asMatrix(Xb)
3522 # Durée d'observation et tailles
3523 if hasattr(Y,"stepnumber"):
3524 duration = Y.stepnumber()
3525 __p = numpy.cumprod(Y.shape())[-1]
3528 __p = numpy.array(Y).size
3530 # Précalcul des inversions de B et R
3531 if selfA._parameters["StoreInternalVariables"] \
3532 or selfA._toStore("CostFunctionJ") \
3533 or selfA._toStore("CostFunctionJb") \
3534 or selfA._toStore("CostFunctionJo") \
3535 or selfA._toStore("CurrentOptimum") \
3536 or selfA._toStore("APosterioriCovariance"):
3542 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
3545 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3546 selfA.StoredVariables["Analysis"].store( Xb )
3547 if selfA._toStore("APosterioriCovariance"):
3548 if hasattr(B,"asfullmatrix"):
3549 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
3551 selfA.StoredVariables["APosterioriCovariance"].store( B )
3552 selfA._setInternalState("seed", numpy.random.get_state())
3553 elif selfA._parameters["nextStep"]:
3554 Xn = selfA._getInternalState("Xn")
3555 Pn = selfA._getInternalState("Pn")
3557 if selfA._parameters["EstimationOf"] == "Parameters":
3559 previousJMinimum = numpy.finfo(float).max
3561 for step in range(duration-1):
3562 if hasattr(Y,"store"):
3563 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
3565 Ynpu = numpy.ravel( Y ).reshape((__p,1))
3568 if hasattr(U,"store") and len(U)>1:
3569 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
3570 elif hasattr(U,"store") and len(U)==1:
3571 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
3573 Un = numpy.asmatrix(numpy.ravel( U )).T
3577 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
3578 Xn_predicted = Mt * Xn
3579 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
3580 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
3581 Xn_predicted = Xn_predicted + Cm * Un
3582 Pn_predicted = Q + Mt * (Pn * Ma)
3583 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
3584 # --- > Par principe, M = Id, Q = 0
3588 if selfA._parameters["EstimationOf"] == "State":
3589 HX_predicted = Ht * Xn_predicted
3590 _Innovation = Ynpu - HX_predicted
3591 elif selfA._parameters["EstimationOf"] == "Parameters":
3592 HX_predicted = Ht * Xn_predicted
3593 _Innovation = Ynpu - HX_predicted
3594 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
3595 _Innovation = _Innovation - Cm * Un
3597 Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
3598 Xn = Xn_predicted + Kn * _Innovation
3599 Pn = Pn_predicted - Kn * Ht * Pn_predicted
3602 #--------------------------
3603 selfA._setInternalState("Xn", Xn)
3604 selfA._setInternalState("Pn", Pn)
3605 #--------------------------
3607 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3608 # ---> avec analysis
3609 selfA.StoredVariables["Analysis"].store( Xa )
3610 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
3611 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Ht * Xa )
3612 if selfA._toStore("InnovationAtCurrentAnalysis"):
3613 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
3614 # ---> avec current state
3615 if selfA._parameters["StoreInternalVariables"] \
3616 or selfA._toStore("CurrentState"):
3617 selfA.StoredVariables["CurrentState"].store( Xn )
3618 if selfA._toStore("ForecastState"):
3619 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
3620 if selfA._toStore("ForecastCovariance"):
3621 selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
3622 if selfA._toStore("BMA"):
3623 selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
3624 if selfA._toStore("InnovationAtCurrentState"):
3625 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3626 if selfA._toStore("SimulatedObservationAtCurrentState") \
3627 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3628 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
3630 if selfA._parameters["StoreInternalVariables"] \
3631 or selfA._toStore("CostFunctionJ") \
3632 or selfA._toStore("CostFunctionJb") \
3633 or selfA._toStore("CostFunctionJo") \
3634 or selfA._toStore("CurrentOptimum") \
3635 or selfA._toStore("APosterioriCovariance"):
3636 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
3637 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
3639 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3640 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3641 selfA.StoredVariables["CostFunctionJ" ].store( J )
3643 if selfA._toStore("IndexOfOptimum") \
3644 or selfA._toStore("CurrentOptimum") \
3645 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
3646 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
3647 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
3648 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3649 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3650 if selfA._toStore("IndexOfOptimum"):
3651 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3652 if selfA._toStore("CurrentOptimum"):
3653 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
3654 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3655 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
3656 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3657 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3658 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3659 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3660 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3661 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3662 if selfA._toStore("APosterioriCovariance"):
3663 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3664 if selfA._parameters["EstimationOf"] == "Parameters" \
3665 and J < previousJMinimum:
3666 previousJMinimum = J
3668 if selfA._toStore("APosterioriCovariance"):
3669 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
3671 # Stockage final supplémentaire de l'optimum en estimation de paramètres
3672 # ----------------------------------------------------------------------
3673 if selfA._parameters["EstimationOf"] == "Parameters":
3674 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3675 selfA.StoredVariables["Analysis"].store( XaMin )
3676 if selfA._toStore("APosterioriCovariance"):
3677 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3678 if selfA._toStore("BMA"):
3679 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3683 # ==============================================================================
3684 def uckf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3686 Constrained Unscented Kalman Filter
3688 if selfA._parameters["EstimationOf"] == "Parameters":
3689 selfA._parameters["StoreInternalVariables"] = True
3690 selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
3693 Alpha = selfA._parameters["Alpha"]
3694 Beta = selfA._parameters["Beta"]
3695 if selfA._parameters["Kappa"] == 0:
3696 if selfA._parameters["EstimationOf"] == "State":
3698 elif selfA._parameters["EstimationOf"] == "Parameters":
3701 Kappa = selfA._parameters["Kappa"]
3702 Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
3703 Gamma = math.sqrt( L + Lambda )
3707 for i in range(2*L):
3708 Ww.append( 1. / (2.*(L + Lambda)) )
3710 Wm = numpy.array( Ww )
3711 Wm[0] = Lambda / (L + Lambda)
3712 Wc = numpy.array( Ww )
3713 Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
3716 Hm = HO["Direct"].appliedControledFormTo
3718 if selfA._parameters["EstimationOf"] == "State":
3719 Mm = EM["Direct"].appliedControledFormTo
3721 if CM is not None and "Tangent" in CM and U is not None:
3722 Cm = CM["Tangent"].asMatrix(Xb)
3726 # Durée d'observation et tailles
3727 if hasattr(Y,"stepnumber"):
3728 duration = Y.stepnumber()
3729 __p = numpy.cumprod(Y.shape())[-1]
3732 __p = numpy.array(Y).size
3734 # Précalcul des inversions de B et R
3735 if selfA._parameters["StoreInternalVariables"] \
3736 or selfA._toStore("CostFunctionJ") \
3737 or selfA._toStore("CostFunctionJb") \
3738 or selfA._toStore("CostFunctionJo") \
3739 or selfA._toStore("CurrentOptimum") \
3740 or selfA._toStore("APosterioriCovariance"):
3746 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
3748 if hasattr(B,"asfullmatrix"):
3749 Pn = B.asfullmatrix(__n)
3752 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3753 selfA.StoredVariables["Analysis"].store( Xb )
3754 if selfA._toStore("APosterioriCovariance"):
3755 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3756 elif selfA._parameters["nextStep"]:
3757 Xn = selfA._getInternalState("Xn")
3758 Pn = selfA._getInternalState("Pn")
3760 if selfA._parameters["EstimationOf"] == "Parameters":
3762 previousJMinimum = numpy.finfo(float).max
3764 for step in range(duration-1):
3765 if hasattr(Y,"store"):
3766 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
3768 Ynpu = numpy.ravel( Y ).reshape((__p,1))
3771 if hasattr(U,"store") and len(U)>1:
3772 Un = numpy.ravel( U[step] ).reshape((-1,1))
3773 elif hasattr(U,"store") and len(U)==1:
3774 Un = numpy.ravel( U[0] ).reshape((-1,1))
3776 Un = numpy.ravel( U ).reshape((-1,1))
3780 Pndemi = numpy.linalg.cholesky(Pn)
3781 Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
3782 nbSpts = 2*Xn.size+1
3784 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3785 for point in range(nbSpts):
3786 Xnp[:,point] = ApplyBounds( Xnp[:,point], selfA._parameters["Bounds"] )
3789 for point in range(nbSpts):
3790 if selfA._parameters["EstimationOf"] == "State":
3791 XEtnnpi = numpy.asmatrix(numpy.ravel( Mm( (Xnp[:,point], Un) ) )).T
3792 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
3793 Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
3794 XEtnnpi = XEtnnpi + Cm * Un
3795 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3796 XEtnnpi = ApplyBounds( XEtnnpi, selfA._parameters["Bounds"] )
3797 elif selfA._parameters["EstimationOf"] == "Parameters":
3798 # --- > Par principe, M = Id, Q = 0
3799 XEtnnpi = Xnp[:,point]
3800 XEtnnp.append( XEtnnpi )
3801 XEtnnp = numpy.hstack( XEtnnp )
3803 Xncm = numpy.matrix( XEtnnp.getA()*numpy.array(Wm) ).sum(axis=1)
3805 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3806 Xncm = ApplyBounds( Xncm, selfA._parameters["Bounds"] )
3808 if selfA._parameters["EstimationOf"] == "State": Pnm = Q
3809 elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
3810 for point in range(nbSpts):
3811 Pnm += Wc[i] * (XEtnnp[:,point]-Xncm) * (XEtnnp[:,point]-Xncm).T
3813 if selfA._parameters["EstimationOf"] == "Parameters" and selfA._parameters["Bounds"] is not None:
3814 Pnmdemi = selfA._parameters["Reconditioner"] * numpy.linalg.cholesky(Pnm)
3816 Pnmdemi = numpy.linalg.cholesky(Pnm)
3818 Xnnp = numpy.hstack([Xncm, Xncm+Gamma*Pnmdemi, Xncm-Gamma*Pnmdemi])
3820 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3821 for point in range(nbSpts):
3822 Xnnp[:,point] = ApplyBounds( Xnnp[:,point], selfA._parameters["Bounds"] )
3825 for point in range(nbSpts):
3826 if selfA._parameters["EstimationOf"] == "State":
3827 Ynnpi = numpy.asmatrix(numpy.ravel( Hm( (Xnnp[:,point], None) ) )).T
3828 elif selfA._parameters["EstimationOf"] == "Parameters":
3829 Ynnpi = numpy.asmatrix(numpy.ravel( Hm( (Xnnp[:,point], Un) ) )).T
3830 Ynnp.append( Ynnpi )
3831 Ynnp = numpy.hstack( Ynnp )
3833 Yncm = numpy.matrix( Ynnp.getA()*numpy.array(Wm) ).sum(axis=1)
3837 for point in range(nbSpts):
3838 Pyyn += Wc[i] * (Ynnp[:,point]-Yncm) * (Ynnp[:,point]-Yncm).T
3839 Pxyn += Wc[i] * (Xnnp[:,point]-Xncm) * (Ynnp[:,point]-Yncm).T
3841 _Innovation = Ynpu - Yncm
3842 if selfA._parameters["EstimationOf"] == "Parameters":
3843 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
3844 _Innovation = _Innovation - Cm * Un
3847 Xn = Xncm + Kn * _Innovation
3848 Pn = Pnm - Kn * Pyyn * Kn.T
3850 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3851 Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
3854 #--------------------------
3855 selfA._setInternalState("Xn", Xn)
3856 selfA._setInternalState("Pn", Pn)
3857 #--------------------------
3859 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3860 # ---> avec analysis
3861 selfA.StoredVariables["Analysis"].store( Xa )
3862 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
3863 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) )
3864 if selfA._toStore("InnovationAtCurrentAnalysis"):
3865 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
3866 # ---> avec current state
3867 if selfA._parameters["StoreInternalVariables"] \
3868 or selfA._toStore("CurrentState"):
3869 selfA.StoredVariables["CurrentState"].store( Xn )
3870 if selfA._toStore("ForecastState"):
3871 selfA.StoredVariables["ForecastState"].store( Xncm )
3872 if selfA._toStore("ForecastCovariance"):
3873 selfA.StoredVariables["ForecastCovariance"].store( Pnm )
3874 if selfA._toStore("BMA"):
3875 selfA.StoredVariables["BMA"].store( Xncm - Xa )
3876 if selfA._toStore("InnovationAtCurrentState"):
3877 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3878 if selfA._toStore("SimulatedObservationAtCurrentState") \
3879 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3880 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm )
3882 if selfA._parameters["StoreInternalVariables"] \
3883 or selfA._toStore("CostFunctionJ") \
3884 or selfA._toStore("CostFunctionJb") \
3885 or selfA._toStore("CostFunctionJo") \
3886 or selfA._toStore("CurrentOptimum") \
3887 or selfA._toStore("APosterioriCovariance"):
3888 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
3889 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
3891 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3892 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3893 selfA.StoredVariables["CostFunctionJ" ].store( J )
3895 if selfA._toStore("IndexOfOptimum") \
3896 or selfA._toStore("CurrentOptimum") \
3897 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
3898 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
3899 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
3900 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3901 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3902 if selfA._toStore("IndexOfOptimum"):
3903 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3904 if selfA._toStore("CurrentOptimum"):
3905 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
3906 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3907 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
3908 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3909 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3910 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3911 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3912 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3913 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3914 if selfA._toStore("APosterioriCovariance"):
3915 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3916 if selfA._parameters["EstimationOf"] == "Parameters" \
3917 and J < previousJMinimum:
3918 previousJMinimum = J
3920 if selfA._toStore("APosterioriCovariance"):
3921 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
3923 # Stockage final supplémentaire de l'optimum en estimation de paramètres
3924 # ----------------------------------------------------------------------
3925 if selfA._parameters["EstimationOf"] == "Parameters":
3926 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3927 selfA.StoredVariables["Analysis"].store( XaMin )
3928 if selfA._toStore("APosterioriCovariance"):
3929 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3930 if selfA._toStore("BMA"):
3931 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3935 # ==============================================================================
3936 def uskf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3938 Unscented Kalman Filter
3940 if selfA._parameters["EstimationOf"] == "Parameters":
3941 selfA._parameters["StoreInternalVariables"] = True
3944 Alpha = selfA._parameters["Alpha"]
3945 Beta = selfA._parameters["Beta"]
3946 if selfA._parameters["Kappa"] == 0:
3947 if selfA._parameters["EstimationOf"] == "State":
3949 elif selfA._parameters["EstimationOf"] == "Parameters":
3952 Kappa = selfA._parameters["Kappa"]
3953 Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
3954 Gamma = math.sqrt( L + Lambda )
3958 for i in range(2*L):
3959 Ww.append( 1. / (2.*(L + Lambda)) )
3961 Wm = numpy.array( Ww )
3962 Wm[0] = Lambda / (L + Lambda)
3963 Wc = numpy.array( Ww )
3964 Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
3967 Hm = HO["Direct"].appliedControledFormTo
3969 if selfA._parameters["EstimationOf"] == "State":
3970 Mm = EM["Direct"].appliedControledFormTo
3972 if CM is not None and "Tangent" in CM and U is not None:
3973 Cm = CM["Tangent"].asMatrix(Xb)
3977 # Durée d'observation et tailles
3978 if hasattr(Y,"stepnumber"):
3979 duration = Y.stepnumber()
3980 __p = numpy.cumprod(Y.shape())[-1]
3983 __p = numpy.array(Y).size
3985 # Précalcul des inversions de B et R
3986 if selfA._parameters["StoreInternalVariables"] \
3987 or selfA._toStore("CostFunctionJ") \
3988 or selfA._toStore("CostFunctionJb") \
3989 or selfA._toStore("CostFunctionJo") \
3990 or selfA._toStore("CurrentOptimum") \
3991 or selfA._toStore("APosterioriCovariance"):
3997 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
3999 if hasattr(B,"asfullmatrix"):
4000 Pn = B.asfullmatrix(__n)
4003 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
4004 selfA.StoredVariables["Analysis"].store( Xb )
4005 if selfA._toStore("APosterioriCovariance"):
4006 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
4007 elif selfA._parameters["nextStep"]:
4008 Xn = selfA._getInternalState("Xn")
4009 Pn = selfA._getInternalState("Pn")
4011 if selfA._parameters["EstimationOf"] == "Parameters":
4013 previousJMinimum = numpy.finfo(float).max
4015 for step in range(duration-1):
4016 if hasattr(Y,"store"):
4017 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
4019 Ynpu = numpy.ravel( Y ).reshape((__p,1))
4022 if hasattr(U,"store") and len(U)>1:
4023 Un = numpy.ravel( U[step] ).reshape((-1,1))
4024 elif hasattr(U,"store") and len(U)==1:
4025 Un = numpy.ravel( U[0] ).reshape((-1,1))
4027 Un = numpy.ravel( U ).reshape((-1,1))
4031 Pndemi = numpy.linalg.cholesky(Pn)
4032 Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
4033 nbSpts = 2*Xn.size+1
4036 for point in range(nbSpts):
4037 if selfA._parameters["EstimationOf"] == "State":
4038 XEtnnpi = numpy.asmatrix(numpy.ravel( Mm( (Xnp[:,point], Un) ) )).T
4039 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
4040 Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
4041 XEtnnpi = XEtnnpi + Cm * Un
4042 elif selfA._parameters["EstimationOf"] == "Parameters":
4043 # --- > Par principe, M = Id, Q = 0
4044 XEtnnpi = Xnp[:,point]
4045 XEtnnp.append( XEtnnpi )
4046 XEtnnp = numpy.hstack( XEtnnp )
4048 Xncm = numpy.matrix( XEtnnp.getA()*numpy.array(Wm) ).sum(axis=1)
4050 if selfA._parameters["EstimationOf"] == "State": Pnm = Q
4051 elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
4052 for point in range(nbSpts):
4053 Pnm += Wc[i] * (XEtnnp[:,point]-Xncm) * (XEtnnp[:,point]-Xncm).T
4055 Pnmdemi = numpy.linalg.cholesky(Pnm)
4057 Xnnp = numpy.hstack([Xncm, Xncm+Gamma*Pnmdemi, Xncm-Gamma*Pnmdemi])
4060 for point in range(nbSpts):
4061 if selfA._parameters["EstimationOf"] == "State":
4062 Ynnpi = numpy.asmatrix(numpy.ravel( Hm( (Xnnp[:,point], None) ) )).T
4063 elif selfA._parameters["EstimationOf"] == "Parameters":
4064 Ynnpi = numpy.asmatrix(numpy.ravel( Hm( (Xnnp[:,point], Un) ) )).T
4065 Ynnp.append( Ynnpi )
4066 Ynnp = numpy.hstack( Ynnp )
4068 Yncm = numpy.matrix( Ynnp.getA()*numpy.array(Wm) ).sum(axis=1)
4072 for point in range(nbSpts):
4073 Pyyn += Wc[i] * (Ynnp[:,point]-Yncm) * (Ynnp[:,point]-Yncm).T
4074 Pxyn += Wc[i] * (Xnnp[:,point]-Xncm) * (Ynnp[:,point]-Yncm).T
4076 _Innovation = Ynpu - Yncm
4077 if selfA._parameters["EstimationOf"] == "Parameters":
4078 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
4079 _Innovation = _Innovation - Cm * Un
4082 Xn = Xncm + Kn * _Innovation
4083 Pn = Pnm - Kn * Pyyn * Kn.T
4086 #--------------------------
4087 selfA._setInternalState("Xn", Xn)
4088 selfA._setInternalState("Pn", Pn)
4089 #--------------------------
4091 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
4092 # ---> avec analysis
4093 selfA.StoredVariables["Analysis"].store( Xa )
4094 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
4095 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) )
4096 if selfA._toStore("InnovationAtCurrentAnalysis"):
4097 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
4098 # ---> avec current state
4099 if selfA._parameters["StoreInternalVariables"] \
4100 or selfA._toStore("CurrentState"):
4101 selfA.StoredVariables["CurrentState"].store( Xn )
4102 if selfA._toStore("ForecastState"):
4103 selfA.StoredVariables["ForecastState"].store( Xncm )
4104 if selfA._toStore("ForecastCovariance"):
4105 selfA.StoredVariables["ForecastCovariance"].store( Pnm )
4106 if selfA._toStore("BMA"):
4107 selfA.StoredVariables["BMA"].store( Xncm - Xa )
4108 if selfA._toStore("InnovationAtCurrentState"):
4109 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
4110 if selfA._toStore("SimulatedObservationAtCurrentState") \
4111 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4112 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm )
4114 if selfA._parameters["StoreInternalVariables"] \
4115 or selfA._toStore("CostFunctionJ") \
4116 or selfA._toStore("CostFunctionJb") \
4117 or selfA._toStore("CostFunctionJo") \
4118 or selfA._toStore("CurrentOptimum") \
4119 or selfA._toStore("APosterioriCovariance"):
4120 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
4121 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
4123 selfA.StoredVariables["CostFunctionJb"].store( Jb )
4124 selfA.StoredVariables["CostFunctionJo"].store( Jo )
4125 selfA.StoredVariables["CostFunctionJ" ].store( J )
4127 if selfA._toStore("IndexOfOptimum") \
4128 or selfA._toStore("CurrentOptimum") \
4129 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
4130 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
4131 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
4132 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4133 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4134 if selfA._toStore("IndexOfOptimum"):
4135 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
4136 if selfA._toStore("CurrentOptimum"):
4137 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
4138 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4139 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
4140 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
4141 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
4142 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
4143 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
4144 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
4145 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
4146 if selfA._toStore("APosterioriCovariance"):
4147 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
4148 if selfA._parameters["EstimationOf"] == "Parameters" \
4149 and J < previousJMinimum:
4150 previousJMinimum = J
4152 if selfA._toStore("APosterioriCovariance"):
4153 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
4155 # Stockage final supplémentaire de l'optimum en estimation de paramètres
4156 # ----------------------------------------------------------------------
4157 if selfA._parameters["EstimationOf"] == "Parameters":
4158 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
4159 selfA.StoredVariables["Analysis"].store( XaMin )
4160 if selfA._toStore("APosterioriCovariance"):
4161 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
4162 if selfA._toStore("BMA"):
4163 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
4167 # ==============================================================================
4168 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
4170 3DVAR variational analysis with no inversion of B
4177 Hm = HO["Direct"].appliedTo
4178 Ha = HO["Adjoint"].appliedInXTo
4180 # Précalcul des inversions de B et R
4184 # Point de démarrage de l'optimisation
4185 Xini = numpy.zeros(Xb.shape)
4187 # Définition de la fonction-coût
4188 # ------------------------------
4189 def CostFunction(v):
4190 _V = numpy.asmatrix(numpy.ravel( v )).T
4192 if selfA._parameters["StoreInternalVariables"] or \
4193 selfA._toStore("CurrentState") or \
4194 selfA._toStore("CurrentOptimum"):
4195 selfA.StoredVariables["CurrentState"].store( _X )
4197 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
4198 _Innovation = Y - _HX
4199 if selfA._toStore("SimulatedObservationAtCurrentState") or \
4200 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4201 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
4202 if selfA._toStore("InnovationAtCurrentState"):
4203 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
4205 Jb = float( 0.5 * _V.T * BT * _V )
4206 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
4209 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
4210 selfA.StoredVariables["CostFunctionJb"].store( Jb )
4211 selfA.StoredVariables["CostFunctionJo"].store( Jo )
4212 selfA.StoredVariables["CostFunctionJ" ].store( J )
4213 if selfA._toStore("IndexOfOptimum") or \
4214 selfA._toStore("CurrentOptimum") or \
4215 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
4216 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
4217 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
4218 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4219 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4220 if selfA._toStore("IndexOfOptimum"):
4221 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
4222 if selfA._toStore("CurrentOptimum"):
4223 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
4224 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4225 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
4226 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
4227 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
4228 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
4229 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
4230 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
4231 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
4234 def GradientOfCostFunction(v):
4235 _V = numpy.asmatrix(numpy.ravel( v )).T
4238 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
4240 GradJo = - Ha( (_X, RI * (Y - _HX)) )
4241 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
4244 # Minimisation de la fonctionnelle
4245 # --------------------------------
4246 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
4248 if selfA._parameters["Minimizer"] == "LBFGSB":
4249 if "0.19" <= scipy.version.version <= "1.1.0":
4250 import lbfgsbhlt as optimiseur
4252 import scipy.optimize as optimiseur
4253 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
4254 func = CostFunction,
4256 fprime = GradientOfCostFunction,
4258 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
4259 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
4260 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
4261 pgtol = selfA._parameters["ProjectedGradientTolerance"],
4262 iprint = selfA._parameters["optiprint"],
4264 nfeval = Informations['funcalls']
4265 rc = Informations['warnflag']
4266 elif selfA._parameters["Minimizer"] == "TNC":
4267 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
4268 func = CostFunction,
4270 fprime = GradientOfCostFunction,
4272 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
4273 maxfun = selfA._parameters["MaximumNumberOfSteps"],
4274 pgtol = selfA._parameters["ProjectedGradientTolerance"],
4275 ftol = selfA._parameters["CostDecrementTolerance"],
4276 messages = selfA._parameters["optmessages"],
4278 elif selfA._parameters["Minimizer"] == "CG":
4279 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
4282 fprime = GradientOfCostFunction,
4284 maxiter = selfA._parameters["MaximumNumberOfSteps"],
4285 gtol = selfA._parameters["GradientNormTolerance"],
4286 disp = selfA._parameters["optdisp"],
4289 elif selfA._parameters["Minimizer"] == "NCG":
4290 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
4293 fprime = GradientOfCostFunction,
4295 maxiter = selfA._parameters["MaximumNumberOfSteps"],
4296 avextol = selfA._parameters["CostDecrementTolerance"],
4297 disp = selfA._parameters["optdisp"],
4300 elif selfA._parameters["Minimizer"] == "BFGS":
4301 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
4304 fprime = GradientOfCostFunction,
4306 maxiter = selfA._parameters["MaximumNumberOfSteps"],
4307 gtol = selfA._parameters["GradientNormTolerance"],
4308 disp = selfA._parameters["optdisp"],
4312 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
4314 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4315 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
4317 # Correction pour pallier a un bug de TNC sur le retour du Minimum
4318 # ----------------------------------------------------------------
4319 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
4320 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
4321 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
4323 Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
4325 # Obtention de l'analyse
4326 # ----------------------
4329 selfA.StoredVariables["Analysis"].store( Xa )
4331 if selfA._toStore("OMA") or \
4332 selfA._toStore("SigmaObs2") or \
4333 selfA._toStore("SimulationQuantiles") or \
4334 selfA._toStore("SimulatedObservationAtOptimum"):
4335 if selfA._toStore("SimulatedObservationAtCurrentState"):
4336 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
4337 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4338 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
4342 # Calcul de la covariance d'analyse
4343 # ---------------------------------
4344 if selfA._toStore("APosterioriCovariance") or \
4345 selfA._toStore("SimulationQuantiles") or \
4346 selfA._toStore("JacobianMatrixAtOptimum") or \
4347 selfA._toStore("KalmanGainAtOptimum"):
4348 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
4349 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
4350 if selfA._toStore("APosterioriCovariance") or \
4351 selfA._toStore("SimulationQuantiles") or \
4352 selfA._toStore("KalmanGainAtOptimum"):
4353 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
4354 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
4355 if selfA._toStore("APosterioriCovariance") or \
4356 selfA._toStore("SimulationQuantiles"):
4361 _ee = numpy.matrix(numpy.zeros(nb)).T
4363 _HtEE = numpy.dot(HtM,_ee)
4364 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
4365 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
4366 HessienneI = numpy.matrix( HessienneI )
4368 if min(A.shape) != max(A.shape):
4369 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
4370 if (numpy.diag(A) < 0).any():
4371 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
4372 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
4374 L = numpy.linalg.cholesky( A )
4376 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
4377 if selfA._toStore("APosterioriCovariance"):
4378 selfA.StoredVariables["APosterioriCovariance"].store( A )
4379 if selfA._toStore("JacobianMatrixAtOptimum"):
4380 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
4381 if selfA._toStore("KalmanGainAtOptimum"):
4382 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
4383 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
4384 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
4386 # Calculs et/ou stockages supplémentaires
4387 # ---------------------------------------
4388 if selfA._toStore("Innovation") or \
4389 selfA._toStore("SigmaObs2") or \
4390 selfA._toStore("MahalanobisConsistency") or \
4391 selfA._toStore("OMB"):
4393 if selfA._toStore("Innovation"):
4394 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
4395 if selfA._toStore("BMA"):
4396 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
4397 if selfA._toStore("OMA"):
4398 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
4399 if selfA._toStore("OMB"):
4400 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
4401 if selfA._toStore("SigmaObs2"):
4402 TraceR = R.trace(Y.size)
4403 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
4404 if selfA._toStore("MahalanobisConsistency"):
4405 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
4406 if selfA._toStore("SimulationQuantiles"):
4407 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
4408 if selfA._toStore("SimulatedObservationAtBackground"):
4409 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
4410 if selfA._toStore("SimulatedObservationAtOptimum"):
4411 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
4415 # ==============================================================================
4416 if __name__ == "__main__":
4417 print('\n AUTODIAGNOSTIC\n')