1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les objets numériques génériques.
26 __author__ = "Jean-Philippe ARGAUD"
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
36 # ==============================================================================
37 def ExecuteFunction( triplet ):
38 assert len(triplet) == 3, "Incorrect number of arguments"
39 X, xArgs, funcrepr = triplet
40 __X = numpy.asmatrix(numpy.ravel( X )).T
41 __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42 __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43 __fonction = getattr(__module,funcrepr["__userFunction__name"])
44 sys.path = __sys_path_tmp ; del __sys_path_tmp
45 if isinstance(xArgs, dict):
46 __HX = __fonction( __X, **xArgs )
48 __HX = __fonction( __X )
49 return numpy.ravel( __HX )
51 # ==============================================================================
52 class FDApproximation(object):
54 Cette classe sert d'interface pour définir les opérateurs approximés. A la
55 création d'un objet, en fournissant une fonction "Function", on obtient un
56 objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
57 "AdjointOperator". On contrôle l'approximation DF avec l'incrément
58 multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
59 "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
60 centrées si le booléen "centeredDF" est vrai.
63 name = "FDApproximation",
68 extraArguments = None,
69 avoidingRedundancy = True,
70 toleranceInRedundancy = 1.e-18,
71 lenghtOfRedundancy = -1,
76 self.__name = str(name)
77 self.__extraArgs = extraArguments
80 import multiprocessing
81 self.__mpEnabled = True
83 self.__mpEnabled = False
85 self.__mpEnabled = False
86 self.__mpWorkers = mpWorkers
87 if self.__mpWorkers is not None and self.__mpWorkers < 1:
88 self.__mpWorkers = None
89 logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
92 self.__mfEnabled = True
94 self.__mfEnabled = False
95 logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
97 if avoidingRedundancy:
99 self.__tolerBP = float(toleranceInRedundancy)
100 self.__lenghtRJ = int(lenghtOfRedundancy)
101 self.__listJPCP = [] # Jacobian Previous Calculated Points
102 self.__listJPCI = [] # Jacobian Previous Calculated Increment
103 self.__listJPCR = [] # Jacobian Previous Calculated Results
104 self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
105 self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
107 self.__avoidRC = False
110 if isinstance(Function,types.FunctionType):
111 logging.debug("FDA Calculs en multiprocessing : FunctionType")
112 self.__userFunction__name = Function.__name__
114 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
116 mod = os.path.abspath(Function.__globals__['__file__'])
117 if not os.path.isfile(mod):
118 raise ImportError("No user defined function or method found with the name %s"%(mod,))
119 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
120 self.__userFunction__path = os.path.dirname(mod)
122 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
123 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
124 elif isinstance(Function,types.MethodType):
125 logging.debug("FDA Calculs en multiprocessing : MethodType")
126 self.__userFunction__name = Function.__name__
128 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
130 mod = os.path.abspath(Function.__func__.__globals__['__file__'])
131 if not os.path.isfile(mod):
132 raise ImportError("No user defined function or method found with the name %s"%(mod,))
133 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
134 self.__userFunction__path = os.path.dirname(mod)
136 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
137 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
139 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
141 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
142 self.__userFunction = self.__userOperator.appliedTo
144 self.__centeredDF = bool(centeredDF)
145 if abs(float(increment)) > 1.e-15:
146 self.__increment = float(increment)
148 self.__increment = 0.01
152 self.__dX = numpy.asmatrix(numpy.ravel( dX )).T
153 logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
155 logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
157 # ---------------------------------------------------------
158 def __doublon__(self, e, l, n, v=None):
159 __ac, __iac = False, -1
160 for i in range(len(l)-1,-1,-1):
161 if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
162 __ac, __iac = True, i
163 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
167 # ---------------------------------------------------------
168 def DirectOperator(self, X, **extraArgs ):
170 Calcul du direct à l'aide de la fonction fournie.
172 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
173 ne doivent pas être données ici à la fonction utilisateur.
175 logging.debug("FDA Calcul DirectOperator (explicite)")
177 _HX = self.__userFunction( X, argsAsSerie = True )
179 _X = numpy.asmatrix(numpy.ravel( X )).T
180 _HX = numpy.ravel(self.__userFunction( _X ))
184 # ---------------------------------------------------------
185 def TangentMatrix(self, X ):
187 Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
188 c'est-à-dire le gradient de H en X. On utilise des différences finies
189 directionnelles autour du point X. X est un numpy.matrix.
191 Différences finies centrées (approximation d'ordre 2):
192 1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
193 dX[i] à la composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
194 on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
196 2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
198 3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
200 Différences finies non centrées (approximation d'ordre 1):
201 1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
202 composante X[i] pour composer X_plus_dXi, et on calcule la réponse
203 HX_plus_dXi = H( X_plus_dXi )
204 2/ On calcule la valeur centrale HX = H(X)
205 3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
207 4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
210 logging.debug("FDA Début du calcul de la Jacobienne")
211 logging.debug("FDA Incrément de............: %s*X"%float(self.__increment))
212 logging.debug("FDA Approximation centrée...: %s"%(self.__centeredDF))
214 if X is None or len(X)==0:
215 raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
217 _X = numpy.asmatrix(numpy.ravel( X )).T
219 if self.__dX is None:
220 _dX = self.__increment * _X
222 _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
224 if (_dX == 0.).any():
227 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
229 _dX = numpy.where( _dX == 0., moyenne, _dX )
231 __alreadyCalculated = False
233 __bidon, __alreadyCalculatedP = self.__doublon__(_X, self.__listJPCP, self.__listJPPN, None)
234 __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
235 if __alreadyCalculatedP == __alreadyCalculatedI > -1:
236 __alreadyCalculated, __i = True, __alreadyCalculatedP
237 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
239 if __alreadyCalculated:
240 logging.debug("FDA Calcul Jacobienne (par récupération du doublon %i)"%__i)
241 _Jacobienne = self.__listJPCR[__i]
243 logging.debug("FDA Calcul Jacobienne (explicite)")
244 if self.__centeredDF:
246 if self.__mpEnabled and not self.__mfEnabled:
248 "__userFunction__path" : self.__userFunction__path,
249 "__userFunction__modl" : self.__userFunction__modl,
250 "__userFunction__name" : self.__userFunction__name,
253 for i in range( len(_dX) ):
255 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
256 _X_plus_dXi[i] = _X[i] + _dXi
257 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
258 _X_moins_dXi[i] = _X[i] - _dXi
260 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
261 _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
263 import multiprocessing
264 self.__pool = multiprocessing.Pool(self.__mpWorkers)
265 _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
270 for i in range( len(_dX) ):
271 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
273 elif self.__mfEnabled:
275 for i in range( len(_dX) ):
277 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
278 _X_plus_dXi[i] = _X[i] + _dXi
279 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
280 _X_moins_dXi[i] = _X[i] - _dXi
282 _xserie.append( _X_plus_dXi )
283 _xserie.append( _X_moins_dXi )
285 _HX_plusmoins_dX = self.DirectOperator( _xserie )
288 for i in range( len(_dX) ):
289 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
293 for i in range( _dX.size ):
295 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
296 _X_plus_dXi[i] = _X[i] + _dXi
297 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
298 _X_moins_dXi[i] = _X[i] - _dXi
300 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
301 _HX_moins_dXi = self.DirectOperator( _X_moins_dXi )
303 _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
307 if self.__mpEnabled and not self.__mfEnabled:
309 "__userFunction__path" : self.__userFunction__path,
310 "__userFunction__modl" : self.__userFunction__modl,
311 "__userFunction__name" : self.__userFunction__name,
314 _jobs.append( (_X.A1, self.__extraArgs, funcrepr) )
315 for i in range( len(_dX) ):
316 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
317 _X_plus_dXi[i] = _X[i] + _dX[i]
319 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
321 import multiprocessing
322 self.__pool = multiprocessing.Pool(self.__mpWorkers)
323 _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
327 _HX = _HX_plus_dX.pop(0)
330 for i in range( len(_dX) ):
331 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
333 elif self.__mfEnabled:
335 _xserie.append( _X.A1 )
336 for i in range( len(_dX) ):
337 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
338 _X_plus_dXi[i] = _X[i] + _dX[i]
340 _xserie.append( _X_plus_dXi )
342 _HX_plus_dX = self.DirectOperator( _xserie )
344 _HX = _HX_plus_dX.pop(0)
347 for i in range( len(_dX) ):
348 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
352 _HX = self.DirectOperator( _X )
353 for i in range( _dX.size ):
355 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
356 _X_plus_dXi[i] = _X[i] + _dXi
358 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
360 _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
363 _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
365 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
366 while len(self.__listJPCP) > self.__lenghtRJ:
367 self.__listJPCP.pop(0)
368 self.__listJPCI.pop(0)
369 self.__listJPCR.pop(0)
370 self.__listJPPN.pop(0)
371 self.__listJPIN.pop(0)
372 self.__listJPCP.append( copy.copy(_X) )
373 self.__listJPCI.append( copy.copy(_dX) )
374 self.__listJPCR.append( copy.copy(_Jacobienne) )
375 self.__listJPPN.append( numpy.linalg.norm(_X) )
376 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
378 logging.debug("FDA Fin du calcul de la Jacobienne")
382 # ---------------------------------------------------------
383 def TangentOperator(self, paire, **extraArgs ):
385 Calcul du tangent à l'aide de la Jacobienne.
387 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
388 ne doivent pas être données ici à la fonction utilisateur.
391 assert len(paire) == 1, "Incorrect lenght of arguments"
393 assert len(_paire) == 2, "Incorrect number of arguments"
395 assert len(paire) == 2, "Incorrect number of arguments"
398 _Jacobienne = self.TangentMatrix( X )
399 if dX is None or len(dX) == 0:
401 # Calcul de la forme matricielle si le second argument est None
402 # -------------------------------------------------------------
403 if self.__mfEnabled: return [_Jacobienne,]
404 else: return _Jacobienne
407 # Calcul de la valeur linéarisée de H en X appliqué à dX
408 # ------------------------------------------------------
409 _dX = numpy.asmatrix(numpy.ravel( dX )).T
410 _HtX = numpy.dot(_Jacobienne, _dX)
411 if self.__mfEnabled: return [_HtX.A1,]
414 # ---------------------------------------------------------
415 def AdjointOperator(self, paire, **extraArgs ):
417 Calcul de l'adjoint à l'aide de la Jacobienne.
419 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
420 ne doivent pas être données ici à la fonction utilisateur.
423 assert len(paire) == 1, "Incorrect lenght of arguments"
425 assert len(_paire) == 2, "Incorrect number of arguments"
427 assert len(paire) == 2, "Incorrect number of arguments"
430 _JacobienneT = self.TangentMatrix( X ).T
431 if Y is None or len(Y) == 0:
433 # Calcul de la forme matricielle si le second argument est None
434 # -------------------------------------------------------------
435 if self.__mfEnabled: return [_JacobienneT,]
436 else: return _JacobienneT
439 # Calcul de la valeur de l'adjoint en X appliqué à Y
440 # --------------------------------------------------
441 _Y = numpy.asmatrix(numpy.ravel( Y )).T
442 _HaY = numpy.dot(_JacobienneT, _Y)
443 if self.__mfEnabled: return [_HaY.A1,]
446 # ==============================================================================
447 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
448 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
450 _bgcenter = numpy.ravel(_bgcenter)[:,None]
452 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
454 if _bgcovariance is None:
455 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
457 _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
458 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
460 return BackgroundEnsemble
462 # ==============================================================================
463 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
464 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
465 def __CenteredRandomAnomalies(Zr, N):
467 Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
468 notes manuscrites de MB et conforme au code de PS avec eps = -1
471 Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
472 Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
473 R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
478 _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
480 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
481 if _bgcovariance is None:
482 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
485 U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
486 _nbctl = _bgcenter.size
487 if _nbmembers > _nbctl:
488 _Z = numpy.concatenate((numpy.dot(
489 numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
490 numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
492 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
493 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
494 BackgroundEnsemble = _bgcenter + _Zca
496 if max(abs(_bgcovariance.flatten())) > 0:
497 _nbctl = _bgcenter.size
498 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
499 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
500 BackgroundEnsemble = _bgcenter + _Zca
502 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
504 return BackgroundEnsemble
506 # ==============================================================================
507 def EnsembleMean( __Ensemble ):
508 "Renvoie la moyenne empirique d'un ensemble"
509 return numpy.asarray(__Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
511 # ==============================================================================
512 def EnsembleOfAnomalies( __Ensemble, __OptMean = None, __Normalisation = 1.):
513 "Renvoie les anomalies centrées à partir d'un ensemble"
514 if __OptMean is None:
515 __Em = EnsembleMean( __Ensemble )
517 __Em = numpy.ravel( __OptMean ).reshape((-1,1))
519 return __Normalisation * (numpy.asarray( __Ensemble ) - __Em)
521 # ==============================================================================
522 def EnsembleErrorCovariance( __Ensemble, __quick = False ):
523 "Renvoie l'estimation empirique de la covariance d'ensemble"
525 # Covariance rapide mais rarement définie positive
526 __Covariance = numpy.cov( __Ensemble )
528 # Résultat souvent identique à numpy.cov, mais plus robuste
529 __n, __m = numpy.asarray( __Ensemble ).shape
530 __Anomalies = EnsembleOfAnomalies( __Ensemble )
531 # Estimation empirique
532 __Covariance = ( __Anomalies @ __Anomalies.T ) / (__m-1)
534 __Covariance = ( __Covariance + __Covariance.T ) * 0.5
535 # Assure la positivité
536 __epsilon = mpr*numpy.trace( __Covariance )
537 __Covariance = __Covariance + __epsilon * numpy.identity(__n)
541 # ==============================================================================
542 def EnsemblePerturbationWithGivenCovariance( __Ensemble, __Covariance, __Seed=None ):
543 "Ajout d'une perturbation à chaque membre d'un ensemble selon une covariance prescrite"
544 if hasattr(__Covariance,"assparsematrix"):
545 if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance.assparsematrix())/abs(__Ensemble).mean() < mpr).all():
546 # Traitement d'une covariance nulle ou presque
548 if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance.assparsematrix()) < mpr).all():
549 # Traitement d'une covariance nulle ou presque
552 if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance)/abs(__Ensemble).mean() < mpr).all():
553 # Traitement d'une covariance nulle ou presque
555 if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance) < mpr).all():
556 # Traitement d'une covariance nulle ou presque
559 __n, __m = __Ensemble.shape
560 if __Seed is not None: numpy.random.seed(__Seed)
562 if hasattr(__Covariance,"isscalar") and __Covariance.isscalar():
563 # Traitement d'une covariance multiple de l'identité
565 __std = numpy.sqrt(__Covariance.assparsematrix())
566 __Ensemble += numpy.random.normal(__zero, __std, size=(__m,__n)).T
568 elif hasattr(__Covariance,"isvector") and __Covariance.isvector():
569 # Traitement d'une covariance diagonale avec variances non identiques
570 __zero = numpy.zeros(__n)
571 __std = numpy.sqrt(__Covariance.assparsematrix())
572 __Ensemble += numpy.asarray([numpy.random.normal(__zero, __std) for i in range(__m)]).T
574 elif hasattr(__Covariance,"ismatrix") and __Covariance.ismatrix():
575 # Traitement d'une covariance pleine
576 __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance.asfullmatrix(__n), size=__m).T
578 elif isinstance(__Covariance, numpy.ndarray):
579 # Traitement d'une covariance numpy pleine, sachant qu'on arrive ici en dernier
580 __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance, size=__m).T
583 raise ValueError("Error in ensemble perturbation with inadequate covariance specification")
587 # ==============================================================================
588 def CovarianceInflation(
590 InflationType = None,
591 InflationFactor = None,
592 BackgroundCov = None,
595 Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
597 Synthèse : Hunt 2007, section 2.3.5
599 if InflationFactor is None:
602 InflationFactor = float(InflationFactor)
604 if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
605 if InflationFactor < 1.:
606 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
607 if InflationFactor < 1.+mpr:
609 OutputCovOrEns = InflationFactor**2 * InputCovOrEns
611 elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
612 if InflationFactor < 1.:
613 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
614 if InflationFactor < 1.+mpr:
616 InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
617 OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
618 + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
620 elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
621 if InflationFactor < 0.:
622 raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
623 if InflationFactor < mpr:
625 __n, __m = numpy.asarray(InputCovOrEns).shape
627 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
628 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
630 elif InflationType == "HybridOnBackgroundCovariance":
631 if InflationFactor < 0.:
632 raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
633 if InflationFactor < mpr:
635 __n, __m = numpy.asarray(InputCovOrEns).shape
637 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
638 if BackgroundCov is None:
639 raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
640 if InputCovOrEns.shape != BackgroundCov.shape:
641 raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
642 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
644 elif InflationType == "Relaxation":
645 raise NotImplementedError("InflationType Relaxation")
648 raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
650 return OutputCovOrEns
652 # ==============================================================================
653 def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None):
654 "Estimation des quantiles a posteriori (selfA est modifié)"
655 nbsamples = selfA._parameters["NumberOfSamplesForQuantiles"]
657 # Traitement des bornes
658 if "StateBoundsForQuantiles" in selfA._parameters:
659 LBounds = selfA._parameters["StateBoundsForQuantiles"] # Prioritaire
660 elif "Bounds" in selfA._parameters:
661 LBounds = selfA._parameters["Bounds"] # Défaut raisonnable
664 if LBounds is not None:
665 LBounds = ForceNumericBounds( LBounds )
666 _Xa = numpy.ravel(Xa)
668 # Échantillonnage des états
671 for i in range(nbsamples):
672 if selfA._parameters["SimulationForQuantiles"] == "Linear" and HtM is not None and HXa is not None:
673 dXr = (numpy.random.multivariate_normal(_Xa,A) - _Xa).reshape((-1,1))
674 if LBounds is not None: # "EstimateProjection" par défaut
675 dXr = numpy.max(numpy.hstack((dXr,LBounds[:,0].reshape((-1,1))) - Xa),axis=1)
676 dXr = numpy.min(numpy.hstack((dXr,LBounds[:,1].reshape((-1,1))) - Xa),axis=1)
678 Yr = HXa.reshape((-1,1)) + dYr
679 if selfA._toStore("SampledStateForQuantiles"): Xr = _Xa + numpy.ravel(dXr)
680 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear" and Hm is not None:
681 Xr = numpy.random.multivariate_normal(_Xa,A)
682 if LBounds is not None: # "EstimateProjection" par défaut
683 Xr = numpy.max(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,0].reshape((-1,1)))),axis=1)
684 Xr = numpy.min(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,1].reshape((-1,1)))),axis=1)
687 raise ValueError("Quantile simulations has only to be Linear or NonLinear.")
690 YfQ = Yr.reshape((-1,1))
691 if selfA._toStore("SampledStateForQuantiles"): EXr = Xr.reshape((-1,1))
693 YfQ = numpy.hstack((YfQ,Yr.reshape((-1,1))))
694 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.hstack((EXr,Xr.reshape((-1,1))))
696 # Extraction des quantiles
699 for quantile in selfA._parameters["Quantiles"]:
700 if not (0. <= float(quantile) <= 1.): continue
701 indice = int(nbsamples * float(quantile) - 1./nbsamples)
702 if YQ is None: YQ = YfQ[:,indice].reshape((-1,1))
703 else: YQ = numpy.hstack((YQ,YfQ[:,indice].reshape((-1,1))))
704 if YQ is not None: # Liste non vide de quantiles
705 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
706 if selfA._toStore("SampledStateForQuantiles"):
707 selfA.StoredVariables["SampledStateForQuantiles"].store( EXr )
711 # ==============================================================================
712 def ForceNumericBounds( __Bounds ):
713 "Force les bornes à être des valeurs numériques, sauf si globalement None"
714 # Conserve une valeur par défaut à None s'il n'y a pas de bornes
715 if __Bounds is None: return None
716 # Converti toutes les bornes individuelles None à +/- l'infini
717 __Bounds = numpy.asarray( __Bounds, dtype=float )
718 if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
719 raise ValueError("Incorrectly shaped bounds data")
720 __Bounds[numpy.isnan(__Bounds[:,0]),0] = -sys.float_info.max
721 __Bounds[numpy.isnan(__Bounds[:,1]),1] = sys.float_info.max
724 # ==============================================================================
725 def RecentredBounds( __Bounds, __Center):
726 "Recentre les bornes autour de 0, sauf si globalement None"
727 # Conserve une valeur par défaut à None s'il n'y a pas de bornes
728 if __Bounds is None: return None
729 # Recentre les valeurs numériques de bornes
730 return ForceNumericBounds( __Bounds ) - numpy.ravel( __Center ).transpose((-1,1))
732 # ==============================================================================
733 def ApplyBounds( __Vector, __Bounds, __newClip = True):
734 "Applique des bornes numériques à un point"
735 # Conserve une valeur par défaut s'il n'y a pas de bornes
736 if __Bounds is None: return __Vector
738 if not isinstance(__Vector, numpy.ndarray): # Is an array
739 raise ValueError("Incorrect array definition of vector data")
740 if not isinstance(__Bounds, numpy.ndarray): # Is an array
741 raise ValueError("Incorrect array definition of bounds data")
742 if 2*__Vector.size != __Bounds.size: # Is a 2 column array of vector lenght
743 raise ValueError("Incorrect bounds number to be applied for this vector")
744 if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
745 raise ValueError("Incorrectly shaped bounds data")
748 __Vector = __Vector.clip(
749 __Bounds[:,0].reshape(__Vector.shape),
750 __Bounds[:,1].reshape(__Vector.shape),
753 __Vector = numpy.max(numpy.hstack((__Vector.reshape((-1,1)),numpy.asmatrix(__Bounds)[:,0])),axis=1)
754 __Vector = numpy.min(numpy.hstack((__Vector.reshape((-1,1)),numpy.asmatrix(__Bounds)[:,1])),axis=1)
755 __Vector = numpy.asarray(__Vector)
759 # ==============================================================================
760 def cekf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
762 Contrained Extended Kalman Filter
764 if selfA._parameters["EstimationOf"] == "Parameters":
765 selfA._parameters["StoreInternalVariables"] = True
766 selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
769 H = HO["Direct"].appliedControledFormTo
771 if selfA._parameters["EstimationOf"] == "State":
772 M = EM["Direct"].appliedControledFormTo
774 if CM is not None and "Tangent" in CM and U is not None:
775 Cm = CM["Tangent"].asMatrix(Xb)
779 # Durée d'observation et tailles
780 if hasattr(Y,"stepnumber"):
781 duration = Y.stepnumber()
782 __p = numpy.cumprod(Y.shape())[-1]
785 __p = numpy.array(Y).size
787 # Précalcul des inversions de B et R
788 if selfA._parameters["StoreInternalVariables"] \
789 or selfA._toStore("CostFunctionJ") \
790 or selfA._toStore("CostFunctionJb") \
791 or selfA._toStore("CostFunctionJo") \
792 or selfA._toStore("CurrentOptimum") \
793 or selfA._toStore("APosterioriCovariance"):
799 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
802 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
803 selfA.StoredVariables["Analysis"].store( Xb )
804 if selfA._toStore("APosterioriCovariance"):
805 if hasattr(B,"asfullmatrix"):
806 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
808 selfA.StoredVariables["APosterioriCovariance"].store( B )
809 selfA._setInternalState("seed", numpy.random.get_state())
810 elif selfA._parameters["nextStep"]:
811 Xn = selfA._getInternalState("Xn")
812 Pn = selfA._getInternalState("Pn")
814 if selfA._parameters["EstimationOf"] == "Parameters":
816 previousJMinimum = numpy.finfo(float).max
818 for step in range(duration-1):
819 if hasattr(Y,"store"):
820 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
822 Ynpu = numpy.ravel( Y ).reshape((__p,1))
824 Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
825 Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
826 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
827 Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
829 if selfA._parameters["EstimationOf"] == "State":
830 Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
831 Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
832 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
833 Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
836 if hasattr(U,"store") and len(U)>1:
837 Un = numpy.ravel( U[step] ).reshape((-1,1))
838 elif hasattr(U,"store") and len(U)==1:
839 Un = numpy.ravel( U[0] ).reshape((-1,1))
841 Un = numpy.ravel( U ).reshape((-1,1))
845 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
846 Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
848 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
849 Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
850 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
851 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
852 Xn_predicted = Xn_predicted + Cm * Un
853 Pn_predicted = Q + Mt * Pn * Ma
854 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
855 # --- > Par principe, M = Id, Q = 0
859 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
860 Xn_predicted = ApplyBounds( Xn_predicted, selfA._parameters["Bounds"] )
862 if selfA._parameters["EstimationOf"] == "State":
863 HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
864 _Innovation = Ynpu - HX_predicted
865 elif selfA._parameters["EstimationOf"] == "Parameters":
866 HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
867 _Innovation = Ynpu - HX_predicted
868 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
869 _Innovation = _Innovation - Cm * Un
871 Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
872 Xn = Xn_predicted + Kn * _Innovation
873 Pn = Pn_predicted - Kn * Ht * Pn_predicted
875 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
876 Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
879 #--------------------------
880 selfA._setInternalState("Xn", Xn)
881 selfA._setInternalState("Pn", Pn)
882 #--------------------------
884 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
886 selfA.StoredVariables["Analysis"].store( Xa )
887 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
888 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
889 if selfA._toStore("InnovationAtCurrentAnalysis"):
890 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
891 # ---> avec current state
892 if selfA._parameters["StoreInternalVariables"] \
893 or selfA._toStore("CurrentState"):
894 selfA.StoredVariables["CurrentState"].store( Xn )
895 if selfA._toStore("ForecastState"):
896 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
897 if selfA._toStore("ForecastCovariance"):
898 selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
899 if selfA._toStore("BMA"):
900 selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
901 if selfA._toStore("InnovationAtCurrentState"):
902 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
903 if selfA._toStore("SimulatedObservationAtCurrentState") \
904 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
905 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
907 if selfA._parameters["StoreInternalVariables"] \
908 or selfA._toStore("CostFunctionJ") \
909 or selfA._toStore("CostFunctionJb") \
910 or selfA._toStore("CostFunctionJo") \
911 or selfA._toStore("CurrentOptimum") \
912 or selfA._toStore("APosterioriCovariance"):
913 Jb = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
914 Jo = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
916 selfA.StoredVariables["CostFunctionJb"].store( Jb )
917 selfA.StoredVariables["CostFunctionJo"].store( Jo )
918 selfA.StoredVariables["CostFunctionJ" ].store( J )
920 if selfA._toStore("IndexOfOptimum") \
921 or selfA._toStore("CurrentOptimum") \
922 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
923 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
924 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
925 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
926 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
927 if selfA._toStore("IndexOfOptimum"):
928 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
929 if selfA._toStore("CurrentOptimum"):
930 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
931 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
932 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
933 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
934 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
935 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
936 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
937 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
938 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
939 if selfA._toStore("APosterioriCovariance"):
940 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
941 if selfA._parameters["EstimationOf"] == "Parameters" \
942 and J < previousJMinimum:
945 if selfA._toStore("APosterioriCovariance"):
946 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
948 # Stockage final supplémentaire de l'optimum en estimation de paramètres
949 # ----------------------------------------------------------------------
950 if selfA._parameters["EstimationOf"] == "Parameters":
951 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
952 selfA.StoredVariables["Analysis"].store( XaMin )
953 if selfA._toStore("APosterioriCovariance"):
954 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
955 if selfA._toStore("BMA"):
956 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
960 # ==============================================================================
961 def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
967 H = HO["Direct"].appliedControledFormTo
969 if selfA._parameters["EstimationOf"] == "State":
970 M = EM["Direct"].appliedControledFormTo
972 if CM is not None and "Tangent" in CM and U is not None:
973 Cm = CM["Tangent"].asMatrix(Xb)
977 # Précalcul des inversions de B et R
980 # Durée d'observation et tailles
981 LagL = selfA._parameters["SmootherLagL"]
982 if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
983 raise ValueError("Fixed-lag smoother requires a series of observation")
984 if Y.stepnumber() < LagL:
985 raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
986 duration = Y.stepnumber()
987 __p = numpy.cumprod(Y.shape())[-1]
989 __m = selfA._parameters["NumberOfMembers"]
991 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
992 selfA.StoredVariables["Analysis"].store( Xb )
993 if selfA._toStore("APosterioriCovariance"):
994 if hasattr(B,"asfullmatrix"):
995 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
997 selfA.StoredVariables["APosterioriCovariance"].store( B )
999 # Calcul direct initial (on privilégie la mémorisation au recalcul)
1000 __seed = numpy.random.get_state()
1001 selfB = copy.deepcopy(selfA)
1002 selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
1003 if VariantM == "EnKS16-KalmanFilterFormula":
1004 etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
1006 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1008 EL = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
1010 EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
1011 selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
1013 for step in range(LagL,duration-1):
1015 sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
1018 if hasattr(Y,"store"):
1019 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1021 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1024 if hasattr(U,"store") and len(U)>1:
1025 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1026 elif hasattr(U,"store") and len(U)==1:
1027 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1029 Un = numpy.asmatrix(numpy.ravel( U )).T
1033 #--------------------------
1034 if VariantM == "EnKS16-KalmanFilterFormula":
1035 if selfA._parameters["EstimationOf"] == "State": # Forecast
1036 EL = M( [(EL[:,i], Un) for i in range(__m)],
1038 returnSerieAsArrayMatrix = True )
1039 EL = EnsemblePerturbationWithGivenCovariance( EL, Q )
1040 EZ = H( [(EL[:,i], Un) for i in range(__m)],
1042 returnSerieAsArrayMatrix = True )
1043 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1044 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1046 elif selfA._parameters["EstimationOf"] == "Parameters":
1047 # --- > Par principe, M = Id, Q = 0
1048 EZ = H( [(EL[:,i], Un) for i in range(__m)],
1050 returnSerieAsArrayMatrix = True )
1052 vEm = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1053 vZm = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1055 mS = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
1056 mS = mS.reshape((-1,__m)) # Pour dimension 1
1057 delta = RIdemi @ ( Ynpu - vZm )
1058 mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
1059 vw = mT @ mS.T @ delta
1061 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
1062 mU = numpy.identity(__m)
1063 wTU = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
1065 EX = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
1069 for irl in range(LagL): # Lissage des L précédentes analysis
1070 vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1071 EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
1072 sEL[irl] = vEm + EX @ wTU
1074 # Conservation de l'analyse retrospective d'ordre 0 avant rotation
1075 Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1076 if selfA._toStore("APosterioriCovariance"):
1079 for irl in range(LagL):
1080 sEL[irl] = sEL[irl+1]
1082 #--------------------------
1084 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1086 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1087 # ---> avec analysis
1088 selfA.StoredVariables["Analysis"].store( Xa )
1089 if selfA._toStore("APosterioriCovariance"):
1090 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
1092 # Stockage des dernières analyses incomplètement remises à jour
1093 for irl in range(LagL):
1094 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1095 Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1096 selfA.StoredVariables["Analysis"].store( Xa )
1100 # ==============================================================================
1101 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
1103 Ensemble-Transform EnKF
1105 if selfA._parameters["EstimationOf"] == "Parameters":
1106 selfA._parameters["StoreInternalVariables"] = True
1109 H = HO["Direct"].appliedControledFormTo
1111 if selfA._parameters["EstimationOf"] == "State":
1112 M = EM["Direct"].appliedControledFormTo
1114 if CM is not None and "Tangent" in CM and U is not None:
1115 Cm = CM["Tangent"].asMatrix(Xb)
1119 # Durée d'observation et tailles
1120 if hasattr(Y,"stepnumber"):
1121 duration = Y.stepnumber()
1122 __p = numpy.cumprod(Y.shape())[-1]
1125 __p = numpy.array(Y).size
1127 # Précalcul des inversions de B et R
1128 if selfA._parameters["StoreInternalVariables"] \
1129 or selfA._toStore("CostFunctionJ") \
1130 or selfA._toStore("CostFunctionJb") \
1131 or selfA._toStore("CostFunctionJo") \
1132 or selfA._toStore("CurrentOptimum") \
1133 or selfA._toStore("APosterioriCovariance"):
1136 elif VariantM != "KalmanFilterFormula":
1138 if VariantM == "KalmanFilterFormula":
1142 __m = selfA._parameters["NumberOfMembers"]
1144 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1145 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1146 selfA.StoredVariables["Analysis"].store( Xb )
1147 if selfA._toStore("APosterioriCovariance"):
1148 if hasattr(B,"asfullmatrix"):
1149 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1151 selfA.StoredVariables["APosterioriCovariance"].store( B )
1152 selfA._setInternalState("seed", numpy.random.get_state())
1153 elif selfA._parameters["nextStep"]:
1154 Xn = selfA._getInternalState("Xn")
1156 previousJMinimum = numpy.finfo(float).max
1158 for step in range(duration-1):
1159 numpy.random.set_state(selfA._getInternalState("seed"))
1160 if hasattr(Y,"store"):
1161 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1163 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1166 if hasattr(U,"store") and len(U)>1:
1167 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1168 elif hasattr(U,"store") and len(U)==1:
1169 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1171 Un = numpy.asmatrix(numpy.ravel( U )).T
1175 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1176 Xn = CovarianceInflation( Xn,
1177 selfA._parameters["InflationType"],
1178 selfA._parameters["InflationFactor"],
1181 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1182 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1184 returnSerieAsArrayMatrix = True )
1185 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
1186 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1188 returnSerieAsArrayMatrix = True )
1189 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1190 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1191 Xn_predicted = Xn_predicted + Cm * Un
1192 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1193 # --- > Par principe, M = Id, Q = 0
1194 Xn_predicted = EMX = Xn
1195 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1197 returnSerieAsArrayMatrix = True )
1199 # Mean of forecast and observation of forecast
1200 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1201 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1204 EaX = EnsembleOfAnomalies( Xn_predicted, Xfm )
1205 EaHX = EnsembleOfAnomalies( HX_predicted, Hfm)
1207 #--------------------------
1208 if VariantM == "KalmanFilterFormula":
1209 mS = RIdemi * EaHX / math.sqrt(__m-1)
1210 mS = mS.reshape((-1,__m)) # Pour dimension 1
1211 delta = RIdemi * ( Ynpu - Hfm )
1212 mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
1213 vw = mT @ mS.T @ delta
1215 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
1216 mU = numpy.identity(__m)
1218 EaX = EaX / math.sqrt(__m-1)
1219 Xn = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
1220 #--------------------------
1221 elif VariantM == "Variational":
1222 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1223 def CostFunction(w):
1224 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1225 _Jo = 0.5 * _A.T @ (RI * _A)
1226 _Jb = 0.5 * (__m-1) * w.T @ w
1229 def GradientOfCostFunction(w):
1230 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1231 _GardJo = - EaHX.T @ (RI * _A)
1232 _GradJb = (__m-1) * w.reshape((__m,1))
1233 _GradJ = _GardJo + _GradJb
1234 return numpy.ravel(_GradJ)
1235 vw = scipy.optimize.fmin_cg(
1237 x0 = numpy.zeros(__m),
1238 fprime = GradientOfCostFunction,
1243 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1244 Htb = (__m-1) * numpy.identity(__m)
1247 Pta = numpy.linalg.inv( Hta )
1248 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1250 Xn = Xfm + EaX @ (vw[:,None] + EWa)
1251 #--------------------------
1252 elif VariantM == "FiniteSize11": # Jauge Boc2011
1253 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1254 def CostFunction(w):
1255 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1256 _Jo = 0.5 * _A.T @ (RI * _A)
1257 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
1260 def GradientOfCostFunction(w):
1261 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1262 _GardJo = - EaHX.T @ (RI * _A)
1263 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1264 _GradJ = _GardJo + _GradJb
1265 return numpy.ravel(_GradJ)
1266 vw = scipy.optimize.fmin_cg(
1268 x0 = numpy.zeros(__m),
1269 fprime = GradientOfCostFunction,
1274 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1276 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1277 / (1 + 1/__m + vw.T @ vw)**2
1280 Pta = numpy.linalg.inv( Hta )
1281 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1283 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1284 #--------------------------
1285 elif VariantM == "FiniteSize15": # Jauge Boc2015
1286 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1287 def CostFunction(w):
1288 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1289 _Jo = 0.5 * _A.T * RI * _A
1290 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
1293 def GradientOfCostFunction(w):
1294 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1295 _GardJo = - EaHX.T @ (RI * _A)
1296 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1297 _GradJ = _GardJo + _GradJb
1298 return numpy.ravel(_GradJ)
1299 vw = scipy.optimize.fmin_cg(
1301 x0 = numpy.zeros(__m),
1302 fprime = GradientOfCostFunction,
1307 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1309 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1310 / (1 + 1/__m + vw.T @ vw)**2
1313 Pta = numpy.linalg.inv( Hta )
1314 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1316 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1317 #--------------------------
1318 elif VariantM == "FiniteSize16": # Jauge Boc2016
1319 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1320 def CostFunction(w):
1321 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1322 _Jo = 0.5 * _A.T @ (RI * _A)
1323 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
1326 def GradientOfCostFunction(w):
1327 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1328 _GardJo = - EaHX.T @ (RI * _A)
1329 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
1330 _GradJ = _GardJo + _GradJb
1331 return numpy.ravel(_GradJ)
1332 vw = scipy.optimize.fmin_cg(
1334 x0 = numpy.zeros(__m),
1335 fprime = GradientOfCostFunction,
1340 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1341 Htb = ((__m+1) / (__m-1)) * \
1342 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
1343 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
1346 Pta = numpy.linalg.inv( Hta )
1347 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1349 Xn = Xfm + EaX @ (vw[:,None] + EWa)
1350 #--------------------------
1352 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1354 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1355 Xn = CovarianceInflation( Xn,
1356 selfA._parameters["InflationType"],
1357 selfA._parameters["InflationFactor"],
1360 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1361 #--------------------------
1362 selfA._setInternalState("Xn", Xn)
1363 selfA._setInternalState("seed", numpy.random.get_state())
1364 #--------------------------
1366 if selfA._parameters["StoreInternalVariables"] \
1367 or selfA._toStore("CostFunctionJ") \
1368 or selfA._toStore("CostFunctionJb") \
1369 or selfA._toStore("CostFunctionJo") \
1370 or selfA._toStore("APosterioriCovariance") \
1371 or selfA._toStore("InnovationAtCurrentAnalysis") \
1372 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1373 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1374 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1375 _Innovation = Ynpu - _HXa
1377 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1378 # ---> avec analysis
1379 selfA.StoredVariables["Analysis"].store( Xa )
1380 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1381 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1382 if selfA._toStore("InnovationAtCurrentAnalysis"):
1383 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1384 # ---> avec current state
1385 if selfA._parameters["StoreInternalVariables"] \
1386 or selfA._toStore("CurrentState"):
1387 selfA.StoredVariables["CurrentState"].store( Xn )
1388 if selfA._toStore("ForecastState"):
1389 selfA.StoredVariables["ForecastState"].store( EMX )
1390 if selfA._toStore("ForecastCovariance"):
1391 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
1392 if selfA._toStore("BMA"):
1393 selfA.StoredVariables["BMA"].store( EMX - Xa )
1394 if selfA._toStore("InnovationAtCurrentState"):
1395 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1396 if selfA._toStore("SimulatedObservationAtCurrentState") \
1397 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1398 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1400 if selfA._parameters["StoreInternalVariables"] \
1401 or selfA._toStore("CostFunctionJ") \
1402 or selfA._toStore("CostFunctionJb") \
1403 or selfA._toStore("CostFunctionJo") \
1404 or selfA._toStore("CurrentOptimum") \
1405 or selfA._toStore("APosterioriCovariance"):
1406 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1407 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1409 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1410 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1411 selfA.StoredVariables["CostFunctionJ" ].store( J )
1413 if selfA._toStore("IndexOfOptimum") \
1414 or selfA._toStore("CurrentOptimum") \
1415 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1416 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1417 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1418 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1419 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1420 if selfA._toStore("IndexOfOptimum"):
1421 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1422 if selfA._toStore("CurrentOptimum"):
1423 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1424 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1425 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1426 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1427 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1428 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1429 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1430 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1431 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1432 if selfA._toStore("APosterioriCovariance"):
1433 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1434 if selfA._parameters["EstimationOf"] == "Parameters" \
1435 and J < previousJMinimum:
1436 previousJMinimum = J
1438 if selfA._toStore("APosterioriCovariance"):
1439 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1440 # ---> Pour les smoothers
1441 if selfA._toStore("CurrentEnsembleState"):
1442 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
1444 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1445 # ----------------------------------------------------------------------
1446 if selfA._parameters["EstimationOf"] == "Parameters":
1447 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1448 selfA.StoredVariables["Analysis"].store( XaMin )
1449 if selfA._toStore("APosterioriCovariance"):
1450 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1451 if selfA._toStore("BMA"):
1452 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1456 # ==============================================================================
1457 def exkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1459 Extended Kalman Filter
1461 if selfA._parameters["EstimationOf"] == "Parameters":
1462 selfA._parameters["StoreInternalVariables"] = True
1465 H = HO["Direct"].appliedControledFormTo
1467 if selfA._parameters["EstimationOf"] == "State":
1468 M = EM["Direct"].appliedControledFormTo
1470 if CM is not None and "Tangent" in CM and U is not None:
1471 Cm = CM["Tangent"].asMatrix(Xb)
1475 # Durée d'observation et tailles
1476 if hasattr(Y,"stepnumber"):
1477 duration = Y.stepnumber()
1478 __p = numpy.cumprod(Y.shape())[-1]
1481 __p = numpy.array(Y).size
1483 # Précalcul des inversions de B et R
1484 if selfA._parameters["StoreInternalVariables"] \
1485 or selfA._toStore("CostFunctionJ") \
1486 or selfA._toStore("CostFunctionJb") \
1487 or selfA._toStore("CostFunctionJo") \
1488 or selfA._toStore("CurrentOptimum") \
1489 or selfA._toStore("APosterioriCovariance"):
1495 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1498 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1499 selfA.StoredVariables["Analysis"].store( Xb )
1500 if selfA._toStore("APosterioriCovariance"):
1501 if hasattr(B,"asfullmatrix"):
1502 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1504 selfA.StoredVariables["APosterioriCovariance"].store( B )
1505 selfA._setInternalState("seed", numpy.random.get_state())
1506 elif selfA._parameters["nextStep"]:
1507 Xn = selfA._getInternalState("Xn")
1508 Pn = selfA._getInternalState("Pn")
1510 if selfA._parameters["EstimationOf"] == "Parameters":
1512 previousJMinimum = numpy.finfo(float).max
1514 for step in range(duration-1):
1515 if hasattr(Y,"store"):
1516 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1518 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1520 Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
1521 Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
1522 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1523 Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
1525 if selfA._parameters["EstimationOf"] == "State":
1526 Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
1527 Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
1528 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1529 Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
1532 if hasattr(U,"store") and len(U)>1:
1533 Un = numpy.ravel( U[step] ).reshape((-1,1))
1534 elif hasattr(U,"store") and len(U)==1:
1535 Un = numpy.ravel( U[0] ).reshape((-1,1))
1537 Un = numpy.ravel( U ).reshape((-1,1))
1541 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1542 Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
1543 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1544 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1545 Xn_predicted = Xn_predicted + Cm * Un
1546 Pn_predicted = Q + Mt * Pn * Ma
1547 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1548 # --- > Par principe, M = Id, Q = 0
1552 if selfA._parameters["EstimationOf"] == "State":
1553 HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
1554 _Innovation = Ynpu - HX_predicted
1555 elif selfA._parameters["EstimationOf"] == "Parameters":
1556 HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
1557 _Innovation = Ynpu - HX_predicted
1558 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
1559 _Innovation = _Innovation - Cm * Un
1561 Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
1562 Xn = Xn_predicted + Kn * _Innovation
1563 Pn = Pn_predicted - Kn * Ht * Pn_predicted
1566 #--------------------------
1567 selfA._setInternalState("Xn", Xn)
1568 selfA._setInternalState("Pn", Pn)
1569 #--------------------------
1571 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1572 # ---> avec analysis
1573 selfA.StoredVariables["Analysis"].store( Xa )
1574 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1575 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
1576 if selfA._toStore("InnovationAtCurrentAnalysis"):
1577 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1578 # ---> avec current state
1579 if selfA._parameters["StoreInternalVariables"] \
1580 or selfA._toStore("CurrentState"):
1581 selfA.StoredVariables["CurrentState"].store( Xn )
1582 if selfA._toStore("ForecastState"):
1583 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1584 if selfA._toStore("ForecastCovariance"):
1585 selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
1586 if selfA._toStore("BMA"):
1587 selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
1588 if selfA._toStore("InnovationAtCurrentState"):
1589 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1590 if selfA._toStore("SimulatedObservationAtCurrentState") \
1591 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1592 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1594 if selfA._parameters["StoreInternalVariables"] \
1595 or selfA._toStore("CostFunctionJ") \
1596 or selfA._toStore("CostFunctionJb") \
1597 or selfA._toStore("CostFunctionJo") \
1598 or selfA._toStore("CurrentOptimum") \
1599 or selfA._toStore("APosterioriCovariance"):
1600 Jb = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
1601 Jo = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
1603 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1604 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1605 selfA.StoredVariables["CostFunctionJ" ].store( J )
1607 if selfA._toStore("IndexOfOptimum") \
1608 or selfA._toStore("CurrentOptimum") \
1609 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1610 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1611 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1612 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1613 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1614 if selfA._toStore("IndexOfOptimum"):
1615 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1616 if selfA._toStore("CurrentOptimum"):
1617 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1618 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1619 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1620 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1621 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1622 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1623 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1624 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1625 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1626 if selfA._toStore("APosterioriCovariance"):
1627 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1628 if selfA._parameters["EstimationOf"] == "Parameters" \
1629 and J < previousJMinimum:
1630 previousJMinimum = J
1632 if selfA._toStore("APosterioriCovariance"):
1633 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1635 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1636 # ----------------------------------------------------------------------
1637 if selfA._parameters["EstimationOf"] == "Parameters":
1638 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1639 selfA.StoredVariables["Analysis"].store( XaMin )
1640 if selfA._toStore("APosterioriCovariance"):
1641 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1642 if selfA._toStore("BMA"):
1643 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1647 # ==============================================================================
1648 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
1649 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1653 if selfA._parameters["EstimationOf"] == "Parameters":
1654 selfA._parameters["StoreInternalVariables"] = True
1657 H = HO["Direct"].appliedControledFormTo
1659 if selfA._parameters["EstimationOf"] == "State":
1660 M = EM["Direct"].appliedControledFormTo
1662 if CM is not None and "Tangent" in CM and U is not None:
1663 Cm = CM["Tangent"].asMatrix(Xb)
1667 # Durée d'observation et tailles
1668 if hasattr(Y,"stepnumber"):
1669 duration = Y.stepnumber()
1670 __p = numpy.cumprod(Y.shape())[-1]
1673 __p = numpy.array(Y).size
1675 # Précalcul des inversions de B et R
1676 if selfA._parameters["StoreInternalVariables"] \
1677 or selfA._toStore("CostFunctionJ") \
1678 or selfA._toStore("CostFunctionJb") \
1679 or selfA._toStore("CostFunctionJo") \
1680 or selfA._toStore("CurrentOptimum") \
1681 or selfA._toStore("APosterioriCovariance"):
1686 __m = selfA._parameters["NumberOfMembers"]
1688 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1689 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1691 Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
1692 selfA.StoredVariables["Analysis"].store( Xb )
1693 if selfA._toStore("APosterioriCovariance"):
1694 if hasattr(B,"asfullmatrix"):
1695 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1697 selfA.StoredVariables["APosterioriCovariance"].store( B )
1698 selfA._setInternalState("seed", numpy.random.get_state())
1699 elif selfA._parameters["nextStep"]:
1700 Xn = selfA._getInternalState("Xn")
1702 previousJMinimum = numpy.finfo(float).max
1704 for step in range(duration-1):
1705 numpy.random.set_state(selfA._getInternalState("seed"))
1706 if hasattr(Y,"store"):
1707 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1709 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1712 if hasattr(U,"store") and len(U)>1:
1713 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1714 elif hasattr(U,"store") and len(U)==1:
1715 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1717 Un = numpy.asmatrix(numpy.ravel( U )).T
1721 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1722 Xn = CovarianceInflation( Xn,
1723 selfA._parameters["InflationType"],
1724 selfA._parameters["InflationFactor"],
1727 #--------------------------
1728 if VariantM == "IEnKF12":
1729 Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
1730 EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
1734 Ta = numpy.identity(__m)
1735 vw = numpy.zeros(__m)
1736 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1737 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1740 E1 = vx1 + _epsilon * EaX
1742 E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1744 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
1745 E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1747 returnSerieAsArrayMatrix = True )
1748 elif selfA._parameters["EstimationOf"] == "Parameters":
1749 # --- > Par principe, M = Id
1751 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1752 vy1 = H((vx2, Un)).reshape((__p,1))
1754 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
1756 returnSerieAsArrayMatrix = True )
1757 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1760 EaY = (HE2 - vy2) / _epsilon
1762 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1764 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
1765 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
1766 Deltaw = - numpy.linalg.solve(mH,GradJ)
1771 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1775 A2 = EnsembleOfAnomalies( E2 )
1778 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1779 A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
1782 #--------------------------
1784 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1786 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1787 Xn = CovarianceInflation( Xn,
1788 selfA._parameters["InflationType"],
1789 selfA._parameters["InflationFactor"],
1792 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1793 #--------------------------
1794 selfA._setInternalState("Xn", Xn)
1795 selfA._setInternalState("seed", numpy.random.get_state())
1796 #--------------------------
1798 if selfA._parameters["StoreInternalVariables"] \
1799 or selfA._toStore("CostFunctionJ") \
1800 or selfA._toStore("CostFunctionJb") \
1801 or selfA._toStore("CostFunctionJo") \
1802 or selfA._toStore("APosterioriCovariance") \
1803 or selfA._toStore("InnovationAtCurrentAnalysis") \
1804 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1805 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1806 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1807 _Innovation = Ynpu - _HXa
1809 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1810 # ---> avec analysis
1811 selfA.StoredVariables["Analysis"].store( Xa )
1812 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1813 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1814 if selfA._toStore("InnovationAtCurrentAnalysis"):
1815 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1816 # ---> avec current state
1817 if selfA._parameters["StoreInternalVariables"] \
1818 or selfA._toStore("CurrentState"):
1819 selfA.StoredVariables["CurrentState"].store( Xn )
1820 if selfA._toStore("ForecastState"):
1821 selfA.StoredVariables["ForecastState"].store( E2 )
1822 if selfA._toStore("ForecastCovariance"):
1823 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(E2) )
1824 if selfA._toStore("BMA"):
1825 selfA.StoredVariables["BMA"].store( E2 - Xa )
1826 if selfA._toStore("InnovationAtCurrentState"):
1827 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1828 if selfA._toStore("SimulatedObservationAtCurrentState") \
1829 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1830 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1832 if selfA._parameters["StoreInternalVariables"] \
1833 or selfA._toStore("CostFunctionJ") \
1834 or selfA._toStore("CostFunctionJb") \
1835 or selfA._toStore("CostFunctionJo") \
1836 or selfA._toStore("CurrentOptimum") \
1837 or selfA._toStore("APosterioriCovariance"):
1838 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1839 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1841 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1842 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1843 selfA.StoredVariables["CostFunctionJ" ].store( J )
1845 if selfA._toStore("IndexOfOptimum") \
1846 or selfA._toStore("CurrentOptimum") \
1847 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1848 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1849 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1850 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1851 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1852 if selfA._toStore("IndexOfOptimum"):
1853 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1854 if selfA._toStore("CurrentOptimum"):
1855 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1856 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1857 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1858 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1859 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1860 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1861 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1862 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1863 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1864 if selfA._toStore("APosterioriCovariance"):
1865 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1866 if selfA._parameters["EstimationOf"] == "Parameters" \
1867 and J < previousJMinimum:
1868 previousJMinimum = J
1870 if selfA._toStore("APosterioriCovariance"):
1871 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1872 # ---> Pour les smoothers
1873 if selfA._toStore("CurrentEnsembleState"):
1874 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
1876 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1877 # ----------------------------------------------------------------------
1878 if selfA._parameters["EstimationOf"] == "Parameters":
1879 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1880 selfA.StoredVariables["Analysis"].store( XaMin )
1881 if selfA._toStore("APosterioriCovariance"):
1882 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1883 if selfA._toStore("BMA"):
1884 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1888 # ==============================================================================
1889 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1897 # Opérateur non-linéaire pour la boucle externe
1898 Hm = HO["Direct"].appliedTo
1900 # Précalcul des inversions de B et R
1904 # Point de démarrage de l'optimisation
1905 Xini = selfA._parameters["InitializationPoint"]
1907 HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
1908 Innovation = Y - HXb
1915 Xr = Xini.reshape((-1,1))
1916 while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
1920 Ht = HO["Tangent"].asMatrix(Xr)
1921 Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
1923 # Définition de la fonction-coût
1924 # ------------------------------
1925 def CostFunction(dx):
1926 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1927 if selfA._parameters["StoreInternalVariables"] or \
1928 selfA._toStore("CurrentState") or \
1929 selfA._toStore("CurrentOptimum"):
1930 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
1932 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1933 _dInnovation = Innovation - _HdX
1934 if selfA._toStore("SimulatedObservationAtCurrentState") or \
1935 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1936 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
1937 if selfA._toStore("InnovationAtCurrentState"):
1938 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
1940 Jb = float( 0.5 * _dX.T * BI * _dX )
1941 Jo = float( 0.5 * _dInnovation.T * RI * _dInnovation )
1944 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1945 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1946 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1947 selfA.StoredVariables["CostFunctionJ" ].store( J )
1948 if selfA._toStore("IndexOfOptimum") or \
1949 selfA._toStore("CurrentOptimum") or \
1950 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1951 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1952 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1953 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1954 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1955 if selfA._toStore("IndexOfOptimum"):
1956 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1957 if selfA._toStore("CurrentOptimum"):
1958 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1959 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1960 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1961 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1962 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1963 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1964 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1965 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1966 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1969 def GradientOfCostFunction(dx):
1970 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1972 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1973 _dInnovation = Innovation - _HdX
1975 GradJo = - Ht.T @ (RI * _dInnovation)
1976 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1979 # Minimisation de la fonctionnelle
1980 # --------------------------------
1981 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1983 if selfA._parameters["Minimizer"] == "LBFGSB":
1984 # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1985 if "0.19" <= scipy.version.version <= "1.1.0":
1986 import lbfgsbhlt as optimiseur
1988 import scipy.optimize as optimiseur
1989 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1990 func = CostFunction,
1991 x0 = numpy.zeros(Xini.size),
1992 fprime = GradientOfCostFunction,
1994 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
1995 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
1996 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
1997 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1998 iprint = selfA._parameters["optiprint"],
2000 nfeval = Informations['funcalls']
2001 rc = Informations['warnflag']
2002 elif selfA._parameters["Minimizer"] == "TNC":
2003 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2004 func = CostFunction,
2005 x0 = numpy.zeros(Xini.size),
2006 fprime = GradientOfCostFunction,
2008 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
2009 maxfun = selfA._parameters["MaximumNumberOfSteps"],
2010 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2011 ftol = selfA._parameters["CostDecrementTolerance"],
2012 messages = selfA._parameters["optmessages"],
2014 elif selfA._parameters["Minimizer"] == "CG":
2015 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2017 x0 = numpy.zeros(Xini.size),
2018 fprime = GradientOfCostFunction,
2020 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2021 gtol = selfA._parameters["GradientNormTolerance"],
2022 disp = selfA._parameters["optdisp"],
2025 elif selfA._parameters["Minimizer"] == "NCG":
2026 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2028 x0 = numpy.zeros(Xini.size),
2029 fprime = GradientOfCostFunction,
2031 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2032 avextol = selfA._parameters["CostDecrementTolerance"],
2033 disp = selfA._parameters["optdisp"],
2036 elif selfA._parameters["Minimizer"] == "BFGS":
2037 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2039 x0 = numpy.zeros(Xini.size),
2040 fprime = GradientOfCostFunction,
2042 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2043 gtol = selfA._parameters["GradientNormTolerance"],
2044 disp = selfA._parameters["optdisp"],
2048 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2050 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2051 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2053 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2054 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2055 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
2057 Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
2060 DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
2061 iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
2063 # Obtention de l'analyse
2064 # ----------------------
2067 selfA.StoredVariables["Analysis"].store( Xa )
2069 if selfA._toStore("OMA") or \
2070 selfA._toStore("SigmaObs2") or \
2071 selfA._toStore("SimulationQuantiles") or \
2072 selfA._toStore("SimulatedObservationAtOptimum"):
2073 if selfA._toStore("SimulatedObservationAtCurrentState"):
2074 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2075 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2076 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2080 # Calcul de la covariance d'analyse
2081 # ---------------------------------
2082 if selfA._toStore("APosterioriCovariance") or \
2083 selfA._toStore("SimulationQuantiles") or \
2084 selfA._toStore("JacobianMatrixAtOptimum") or \
2085 selfA._toStore("KalmanGainAtOptimum"):
2086 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2087 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2088 if selfA._toStore("APosterioriCovariance") or \
2089 selfA._toStore("SimulationQuantiles") or \
2090 selfA._toStore("KalmanGainAtOptimum"):
2091 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2092 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2093 if selfA._toStore("APosterioriCovariance") or \
2094 selfA._toStore("SimulationQuantiles"):
2098 _ee = numpy.matrix(numpy.zeros(nb)).T
2100 _HtEE = numpy.dot(HtM,_ee)
2101 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
2102 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2103 HessienneI = numpy.matrix( HessienneI )
2105 if min(A.shape) != max(A.shape):
2106 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2107 if (numpy.diag(A) < 0).any():
2108 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2109 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2111 L = numpy.linalg.cholesky( A )
2113 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2114 if selfA._toStore("APosterioriCovariance"):
2115 selfA.StoredVariables["APosterioriCovariance"].store( A )
2116 if selfA._toStore("JacobianMatrixAtOptimum"):
2117 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2118 if selfA._toStore("KalmanGainAtOptimum"):
2119 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2120 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2121 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2123 # Calculs et/ou stockages supplémentaires
2124 # ---------------------------------------
2125 if selfA._toStore("Innovation") or \
2126 selfA._toStore("SigmaObs2") or \
2127 selfA._toStore("MahalanobisConsistency") or \
2128 selfA._toStore("OMB"):
2130 if selfA._toStore("Innovation"):
2131 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2132 if selfA._toStore("BMA"):
2133 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2134 if selfA._toStore("OMA"):
2135 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2136 if selfA._toStore("OMB"):
2137 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2138 if selfA._toStore("SigmaObs2"):
2139 TraceR = R.trace(Y.size)
2140 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2141 if selfA._toStore("MahalanobisConsistency"):
2142 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2143 if selfA._toStore("SimulationQuantiles"):
2144 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
2145 if selfA._toStore("SimulatedObservationAtBackground"):
2146 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2147 if selfA._toStore("SimulatedObservationAtOptimum"):
2148 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2152 # ==============================================================================
2153 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
2154 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
2156 Maximum Likelihood Ensemble Filter
2158 if selfA._parameters["EstimationOf"] == "Parameters":
2159 selfA._parameters["StoreInternalVariables"] = True
2162 H = HO["Direct"].appliedControledFormTo
2164 if selfA._parameters["EstimationOf"] == "State":
2165 M = EM["Direct"].appliedControledFormTo
2167 if CM is not None and "Tangent" in CM and U is not None:
2168 Cm = CM["Tangent"].asMatrix(Xb)
2172 # Durée d'observation et tailles
2173 if hasattr(Y,"stepnumber"):
2174 duration = Y.stepnumber()
2175 __p = numpy.cumprod(Y.shape())[-1]
2178 __p = numpy.array(Y).size
2180 # Précalcul des inversions de B et R
2181 if selfA._parameters["StoreInternalVariables"] \
2182 or selfA._toStore("CostFunctionJ") \
2183 or selfA._toStore("CostFunctionJb") \
2184 or selfA._toStore("CostFunctionJo") \
2185 or selfA._toStore("CurrentOptimum") \
2186 or selfA._toStore("APosterioriCovariance"):
2191 __m = selfA._parameters["NumberOfMembers"]
2193 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2194 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2195 selfA.StoredVariables["Analysis"].store( Xb )
2196 if selfA._toStore("APosterioriCovariance"):
2197 if hasattr(B,"asfullmatrix"):
2198 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
2200 selfA.StoredVariables["APosterioriCovariance"].store( B )
2201 selfA._setInternalState("seed", numpy.random.get_state())
2202 elif selfA._parameters["nextStep"]:
2203 Xn = selfA._getInternalState("Xn")
2205 previousJMinimum = numpy.finfo(float).max
2207 for step in range(duration-1):
2208 numpy.random.set_state(selfA._getInternalState("seed"))
2209 if hasattr(Y,"store"):
2210 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2212 Ynpu = numpy.ravel( Y ).reshape((__p,1))
2215 if hasattr(U,"store") and len(U)>1:
2216 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2217 elif hasattr(U,"store") and len(U)==1:
2218 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2220 Un = numpy.asmatrix(numpy.ravel( U )).T
2224 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2225 Xn = CovarianceInflation( Xn,
2226 selfA._parameters["InflationType"],
2227 selfA._parameters["InflationFactor"],
2230 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2231 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2233 returnSerieAsArrayMatrix = True )
2234 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
2235 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2236 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2237 Xn_predicted = Xn_predicted + Cm * Un
2238 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2239 # --- > Par principe, M = Id, Q = 0
2240 Xn_predicted = EMX = Xn
2242 #--------------------------
2243 if VariantM == "MLEF13":
2244 Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
2245 EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
2246 Ua = numpy.identity(__m)
2250 Ta = numpy.identity(__m)
2251 vw = numpy.zeros(__m)
2252 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2253 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
2256 E1 = vx1 + _epsilon * EaX
2258 E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
2260 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2262 returnSerieAsArrayMatrix = True )
2263 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2266 EaY = (HE2 - vy2) / _epsilon
2268 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
2270 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
2271 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
2272 Deltaw = - numpy.linalg.solve(mH,GradJ)
2277 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2282 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2284 Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
2285 #--------------------------
2287 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2289 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2290 Xn = CovarianceInflation( Xn,
2291 selfA._parameters["InflationType"],
2292 selfA._parameters["InflationFactor"],
2295 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2296 #--------------------------
2297 selfA._setInternalState("Xn", Xn)
2298 selfA._setInternalState("seed", numpy.random.get_state())
2299 #--------------------------
2301 if selfA._parameters["StoreInternalVariables"] \
2302 or selfA._toStore("CostFunctionJ") \
2303 or selfA._toStore("CostFunctionJb") \
2304 or selfA._toStore("CostFunctionJo") \
2305 or selfA._toStore("APosterioriCovariance") \
2306 or selfA._toStore("InnovationAtCurrentAnalysis") \
2307 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2308 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2309 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2310 _Innovation = Ynpu - _HXa
2312 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2313 # ---> avec analysis
2314 selfA.StoredVariables["Analysis"].store( Xa )
2315 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2316 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2317 if selfA._toStore("InnovationAtCurrentAnalysis"):
2318 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2319 # ---> avec current state
2320 if selfA._parameters["StoreInternalVariables"] \
2321 or selfA._toStore("CurrentState"):
2322 selfA.StoredVariables["CurrentState"].store( Xn )
2323 if selfA._toStore("ForecastState"):
2324 selfA.StoredVariables["ForecastState"].store( EMX )
2325 if selfA._toStore("ForecastCovariance"):
2326 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
2327 if selfA._toStore("BMA"):
2328 selfA.StoredVariables["BMA"].store( EMX - Xa )
2329 if selfA._toStore("InnovationAtCurrentState"):
2330 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
2331 if selfA._toStore("SimulatedObservationAtCurrentState") \
2332 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2333 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2335 if selfA._parameters["StoreInternalVariables"] \
2336 or selfA._toStore("CostFunctionJ") \
2337 or selfA._toStore("CostFunctionJb") \
2338 or selfA._toStore("CostFunctionJo") \
2339 or selfA._toStore("CurrentOptimum") \
2340 or selfA._toStore("APosterioriCovariance"):
2341 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2342 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2344 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2345 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2346 selfA.StoredVariables["CostFunctionJ" ].store( J )
2348 if selfA._toStore("IndexOfOptimum") \
2349 or selfA._toStore("CurrentOptimum") \
2350 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2351 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2352 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2353 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2354 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2355 if selfA._toStore("IndexOfOptimum"):
2356 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2357 if selfA._toStore("CurrentOptimum"):
2358 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2359 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2360 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2361 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2362 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2363 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2364 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2365 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2366 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2367 if selfA._toStore("APosterioriCovariance"):
2368 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2369 if selfA._parameters["EstimationOf"] == "Parameters" \
2370 and J < previousJMinimum:
2371 previousJMinimum = J
2373 if selfA._toStore("APosterioriCovariance"):
2374 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
2375 # ---> Pour les smoothers
2376 if selfA._toStore("CurrentEnsembleState"):
2377 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
2379 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2380 # ----------------------------------------------------------------------
2381 if selfA._parameters["EstimationOf"] == "Parameters":
2382 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2383 selfA.StoredVariables["Analysis"].store( XaMin )
2384 if selfA._toStore("APosterioriCovariance"):
2385 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2386 if selfA._toStore("BMA"):
2387 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2391 # ==============================================================================
2403 Implémentation informatique de l'algorithme MMQR, basée sur la publication :
2404 David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
2405 Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
2408 # Recuperation des donnees et informations initiales
2409 # --------------------------------------------------
2410 variables = numpy.ravel( x0 )
2411 mesures = numpy.ravel( y )
2412 increment = sys.float_info[0]
2415 quantile = float(quantile)
2417 # Calcul des parametres du MM
2418 # ---------------------------
2419 tn = float(toler) / n
2420 e0 = -tn / math.log(tn)
2421 epsilon = (e0-tn)/(1+math.log(e0))
2423 # Calculs d'initialisation
2424 # ------------------------
2425 residus = mesures - numpy.ravel( func( variables ) )
2426 poids = 1./(epsilon+numpy.abs(residus))
2427 veps = 1. - 2. * quantile - residus * poids
2428 lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
2431 # Recherche iterative
2432 # -------------------
2433 while (increment > toler) and (iteration < maxfun) :
2436 Derivees = numpy.array(fprime(variables))
2437 Derivees = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
2438 DeriveesT = Derivees.transpose()
2439 M = numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
2440 SM = numpy.transpose(numpy.dot( DeriveesT , veps ))
2441 step = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
2443 variables = variables + step
2444 if bounds is not None:
2445 # Attention : boucle infinie à éviter si un intervalle est trop petit
2446 while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
2448 variables = variables - step
2449 residus = mesures - numpy.ravel( func(variables) )
2450 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2452 while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
2454 variables = variables - step
2455 residus = mesures - numpy.ravel( func(variables) )
2456 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2458 increment = lastsurrogate-surrogate
2459 poids = 1./(epsilon+numpy.abs(residus))
2460 veps = 1. - 2. * quantile - residus * poids
2461 lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
2465 Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
2467 return variables, Ecart, [n,p,iteration,increment,0]
2469 # ==============================================================================
2470 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
2472 3DVAR multi-pas et multi-méthodes
2476 if selfA._parameters["EstimationOf"] == "State":
2477 M = EM["Direct"].appliedTo
2479 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2480 Xn = numpy.ravel(Xb).reshape((-1,1))
2481 selfA.StoredVariables["Analysis"].store( Xn )
2482 if selfA._toStore("APosterioriCovariance"):
2483 if hasattr(B,"asfullmatrix"):
2484 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(Xn.size) )
2486 selfA.StoredVariables["APosterioriCovariance"].store( B )
2487 if selfA._toStore("ForecastState"):
2488 selfA.StoredVariables["ForecastState"].store( Xn )
2489 elif selfA._parameters["nextStep"]:
2490 Xn = selfA._getInternalState("Xn")
2492 Xn = numpy.ravel(Xb).reshape((-1,1))
2494 if hasattr(Y,"stepnumber"):
2495 duration = Y.stepnumber()
2500 for step in range(duration-1):
2501 if hasattr(Y,"store"):
2502 Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
2504 Ynpu = numpy.ravel( Y ).reshape((-1,1))
2506 if selfA._parameters["EstimationOf"] == "State": # Forecast
2507 Xn_predicted = M( Xn )
2508 if selfA._toStore("ForecastState"):
2509 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
2510 elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
2511 # --- > Par principe, M = Id, Q = 0
2513 Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
2515 oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
2517 Xn = selfA.StoredVariables["Analysis"][-1]
2518 #--------------------------
2519 selfA._setInternalState("Xn", Xn)
2523 # ==============================================================================
2524 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2533 Hm = HO["Direct"].appliedTo
2535 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
2536 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2537 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2540 HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2541 if Y.size != HXb.size:
2542 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2543 if max(Y.shape) != max(HXb.shape):
2544 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2546 if selfA._toStore("JacobianMatrixAtBackground"):
2547 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2548 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2549 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2551 Ht = HO["Tangent"].asMatrix(Xb)
2553 HBHTpR = R + Ht * BHT
2554 Innovation = Y - HXb
2556 # Point de démarrage de l'optimisation
2557 Xini = numpy.zeros(Xb.shape)
2559 # Définition de la fonction-coût
2560 # ------------------------------
2561 def CostFunction(w):
2562 _W = numpy.asmatrix(numpy.ravel( w )).T
2563 if selfA._parameters["StoreInternalVariables"] or \
2564 selfA._toStore("CurrentState") or \
2565 selfA._toStore("CurrentOptimum"):
2566 selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
2567 if selfA._toStore("SimulatedObservationAtCurrentState") or \
2568 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2569 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
2570 if selfA._toStore("InnovationAtCurrentState"):
2571 selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
2573 Jb = float( 0.5 * _W.T * HBHTpR * _W )
2574 Jo = float( - _W.T * Innovation )
2577 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2578 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2579 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2580 selfA.StoredVariables["CostFunctionJ" ].store( J )
2581 if selfA._toStore("IndexOfOptimum") or \
2582 selfA._toStore("CurrentOptimum") or \
2583 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2584 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2585 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2586 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2587 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2588 if selfA._toStore("IndexOfOptimum"):
2589 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2590 if selfA._toStore("CurrentOptimum"):
2591 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2592 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2593 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2594 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2595 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2596 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2597 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2598 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2599 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2602 def GradientOfCostFunction(w):
2603 _W = numpy.asmatrix(numpy.ravel( w )).T
2604 GradJb = HBHTpR * _W
2605 GradJo = - Innovation
2606 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2609 # Minimisation de la fonctionnelle
2610 # --------------------------------
2611 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2613 if selfA._parameters["Minimizer"] == "LBFGSB":
2614 if "0.19" <= scipy.version.version <= "1.1.0":
2615 import lbfgsbhlt as optimiseur
2617 import scipy.optimize as optimiseur
2618 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2619 func = CostFunction,
2621 fprime = GradientOfCostFunction,
2623 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
2624 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
2625 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
2626 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2627 iprint = selfA._parameters["optiprint"],
2629 nfeval = Informations['funcalls']
2630 rc = Informations['warnflag']
2631 elif selfA._parameters["Minimizer"] == "TNC":
2632 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2633 func = CostFunction,
2635 fprime = GradientOfCostFunction,
2637 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
2638 maxfun = selfA._parameters["MaximumNumberOfSteps"],
2639 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2640 ftol = selfA._parameters["CostDecrementTolerance"],
2641 messages = selfA._parameters["optmessages"],
2643 elif selfA._parameters["Minimizer"] == "CG":
2644 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2647 fprime = GradientOfCostFunction,
2649 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2650 gtol = selfA._parameters["GradientNormTolerance"],
2651 disp = selfA._parameters["optdisp"],
2654 elif selfA._parameters["Minimizer"] == "NCG":
2655 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2658 fprime = GradientOfCostFunction,
2660 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2661 avextol = selfA._parameters["CostDecrementTolerance"],
2662 disp = selfA._parameters["optdisp"],
2665 elif selfA._parameters["Minimizer"] == "BFGS":
2666 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2669 fprime = GradientOfCostFunction,
2671 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2672 gtol = selfA._parameters["GradientNormTolerance"],
2673 disp = selfA._parameters["optdisp"],
2677 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2679 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2680 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2682 # Correction pour pallier a un bug de TNC sur le retour du Minimum
2683 # ----------------------------------------------------------------
2684 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2685 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2686 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
2688 Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
2690 # Obtention de l'analyse
2691 # ----------------------
2694 selfA.StoredVariables["Analysis"].store( Xa )
2696 if selfA._toStore("OMA") or \
2697 selfA._toStore("SigmaObs2") or \
2698 selfA._toStore("SimulationQuantiles") or \
2699 selfA._toStore("SimulatedObservationAtOptimum"):
2700 if selfA._toStore("SimulatedObservationAtCurrentState"):
2701 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2702 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2703 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2707 # Calcul de la covariance d'analyse
2708 # ---------------------------------
2709 if selfA._toStore("APosterioriCovariance") or \
2710 selfA._toStore("SimulationQuantiles") or \
2711 selfA._toStore("JacobianMatrixAtOptimum") or \
2712 selfA._toStore("KalmanGainAtOptimum"):
2713 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2714 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2715 if selfA._toStore("APosterioriCovariance") or \
2716 selfA._toStore("SimulationQuantiles") or \
2717 selfA._toStore("KalmanGainAtOptimum"):
2718 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2719 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2720 if selfA._toStore("APosterioriCovariance") or \
2721 selfA._toStore("SimulationQuantiles"):
2727 _ee = numpy.matrix(numpy.zeros(nb)).T
2729 _HtEE = numpy.dot(HtM,_ee)
2730 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
2731 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2732 HessienneI = numpy.matrix( HessienneI )
2734 if min(A.shape) != max(A.shape):
2735 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2736 if (numpy.diag(A) < 0).any():
2737 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2738 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2740 L = numpy.linalg.cholesky( A )
2742 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2743 if selfA._toStore("APosterioriCovariance"):
2744 selfA.StoredVariables["APosterioriCovariance"].store( A )
2745 if selfA._toStore("JacobianMatrixAtOptimum"):
2746 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2747 if selfA._toStore("KalmanGainAtOptimum"):
2748 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2749 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2750 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2752 # Calculs et/ou stockages supplémentaires
2753 # ---------------------------------------
2754 if selfA._toStore("Innovation") or \
2755 selfA._toStore("SigmaObs2") or \
2756 selfA._toStore("MahalanobisConsistency") or \
2757 selfA._toStore("OMB"):
2759 if selfA._toStore("Innovation"):
2760 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2761 if selfA._toStore("BMA"):
2762 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2763 if selfA._toStore("OMA"):
2764 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2765 if selfA._toStore("OMB"):
2766 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2767 if selfA._toStore("SigmaObs2"):
2768 TraceR = R.trace(Y.size)
2769 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2770 if selfA._toStore("MahalanobisConsistency"):
2771 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2772 if selfA._toStore("SimulationQuantiles"):
2773 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
2774 if selfA._toStore("SimulatedObservationAtBackground"):
2775 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2776 if selfA._toStore("SimulatedObservationAtOptimum"):
2777 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2781 # ==============================================================================
2782 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16"):
2786 if selfA._parameters["EstimationOf"] == "Parameters":
2787 selfA._parameters["StoreInternalVariables"] = True
2790 H = HO["Direct"].appliedControledFormTo
2792 if selfA._parameters["EstimationOf"] == "State":
2793 M = EM["Direct"].appliedControledFormTo
2795 if CM is not None and "Tangent" in CM and U is not None:
2796 Cm = CM["Tangent"].asMatrix(Xb)
2800 # Durée d'observation et tailles
2801 if hasattr(Y,"stepnumber"):
2802 duration = Y.stepnumber()
2803 __p = numpy.cumprod(Y.shape())[-1]
2806 __p = numpy.array(Y).size
2808 # Précalcul des inversions de B et R
2809 if selfA._parameters["StoreInternalVariables"] \
2810 or selfA._toStore("CostFunctionJ") \
2811 or selfA._toStore("CostFunctionJb") \
2812 or selfA._toStore("CostFunctionJo") \
2813 or selfA._toStore("CurrentOptimum") \
2814 or selfA._toStore("APosterioriCovariance"):
2819 __m = selfA._parameters["NumberOfMembers"]
2821 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2824 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2825 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2827 Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
2828 selfA.StoredVariables["Analysis"].store( Xb )
2829 if selfA._toStore("APosterioriCovariance"):
2830 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2831 selfA._setInternalState("seed", numpy.random.get_state())
2832 elif selfA._parameters["nextStep"]:
2833 Xn = selfA._getInternalState("Xn")
2835 previousJMinimum = numpy.finfo(float).max
2837 for step in range(duration-1):
2838 numpy.random.set_state(selfA._getInternalState("seed"))
2839 if hasattr(Y,"store"):
2840 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2842 Ynpu = numpy.ravel( Y ).reshape((__p,1))
2845 if hasattr(U,"store") and len(U)>1:
2846 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2847 elif hasattr(U,"store") and len(U)==1:
2848 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2850 Un = numpy.asmatrix(numpy.ravel( U )).T
2854 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2855 Xn = CovarianceInflation( Xn,
2856 selfA._parameters["InflationType"],
2857 selfA._parameters["InflationFactor"],
2860 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2861 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2863 returnSerieAsArrayMatrix = True )
2864 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
2865 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2867 returnSerieAsArrayMatrix = True )
2868 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2869 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2870 Xn_predicted = Xn_predicted + Cm * Un
2871 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2872 # --- > Par principe, M = Id, Q = 0
2873 Xn_predicted = EMX = Xn
2874 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2876 returnSerieAsArrayMatrix = True )
2878 # Mean of forecast and observation of forecast
2879 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2880 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2882 #--------------------------
2883 if VariantM == "KalmanFilterFormula05":
2884 PfHT, HPfHT = 0., 0.
2885 for i in range(__m):
2886 Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
2887 Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
2888 PfHT += Exfi * Eyfi.T
2889 HPfHT += Eyfi * Eyfi.T
2890 PfHT = (1./(__m-1)) * PfHT
2891 HPfHT = (1./(__m-1)) * HPfHT
2892 Kn = PfHT * ( R + HPfHT ).I
2895 for i in range(__m):
2896 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
2897 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
2898 #--------------------------
2899 elif VariantM == "KalmanFilterFormula16":
2900 EpY = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
2901 EpYm = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2903 EaX = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
2904 EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
2906 Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
2908 for i in range(__m):
2909 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
2910 #--------------------------
2912 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2914 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2915 Xn = CovarianceInflation( Xn,
2916 selfA._parameters["InflationType"],
2917 selfA._parameters["InflationFactor"],
2920 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2921 #--------------------------
2922 selfA._setInternalState("Xn", Xn)
2923 selfA._setInternalState("seed", numpy.random.get_state())
2924 #--------------------------
2926 if selfA._parameters["StoreInternalVariables"] \
2927 or selfA._toStore("CostFunctionJ") \
2928 or selfA._toStore("CostFunctionJb") \
2929 or selfA._toStore("CostFunctionJo") \
2930 or selfA._toStore("APosterioriCovariance") \
2931 or selfA._toStore("InnovationAtCurrentAnalysis") \
2932 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2933 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2934 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2935 _Innovation = Ynpu - _HXa
2937 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2938 # ---> avec analysis
2939 selfA.StoredVariables["Analysis"].store( Xa )
2940 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2941 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2942 if selfA._toStore("InnovationAtCurrentAnalysis"):
2943 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2944 # ---> avec current state
2945 if selfA._parameters["StoreInternalVariables"] \
2946 or selfA._toStore("CurrentState"):
2947 selfA.StoredVariables["CurrentState"].store( Xn )
2948 if selfA._toStore("ForecastState"):
2949 selfA.StoredVariables["ForecastState"].store( EMX )
2950 if selfA._toStore("ForecastCovariance"):
2951 selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
2952 if selfA._toStore("BMA"):
2953 selfA.StoredVariables["BMA"].store( EMX - Xa )
2954 if selfA._toStore("InnovationAtCurrentState"):
2955 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
2956 if selfA._toStore("SimulatedObservationAtCurrentState") \
2957 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2958 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2960 if selfA._parameters["StoreInternalVariables"] \
2961 or selfA._toStore("CostFunctionJ") \
2962 or selfA._toStore("CostFunctionJb") \
2963 or selfA._toStore("CostFunctionJo") \
2964 or selfA._toStore("CurrentOptimum") \
2965 or selfA._toStore("APosterioriCovariance"):
2966 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2967 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2969 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2970 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2971 selfA.StoredVariables["CostFunctionJ" ].store( J )
2973 if selfA._toStore("IndexOfOptimum") \
2974 or selfA._toStore("CurrentOptimum") \
2975 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2976 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2977 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2978 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2979 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2980 if selfA._toStore("IndexOfOptimum"):
2981 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2982 if selfA._toStore("CurrentOptimum"):
2983 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2984 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2985 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2986 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2987 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2988 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2989 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2990 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2991 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2992 if selfA._toStore("APosterioriCovariance"):
2993 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2994 if selfA._parameters["EstimationOf"] == "Parameters" \
2995 and J < previousJMinimum:
2996 previousJMinimum = J
2998 if selfA._toStore("APosterioriCovariance"):
2999 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
3000 # ---> Pour les smoothers
3001 if selfA._toStore("CurrentEnsembleState"):
3002 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
3004 # Stockage final supplémentaire de l'optimum en estimation de paramètres
3005 # ----------------------------------------------------------------------
3006 if selfA._parameters["EstimationOf"] == "Parameters":
3007 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3008 selfA.StoredVariables["Analysis"].store( XaMin )
3009 if selfA._toStore("APosterioriCovariance"):
3010 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3011 if selfA._toStore("BMA"):
3012 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3016 # ==============================================================================
3017 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3026 Hm = HO["Direct"].appliedTo
3027 Ha = HO["Adjoint"].appliedInXTo
3029 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
3030 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
3031 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
3034 HXb = HXb.reshape((-1,1))
3035 if Y.size != HXb.size:
3036 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
3037 if max(Y.shape) != max(HXb.shape):
3038 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
3040 if selfA._toStore("JacobianMatrixAtBackground"):
3041 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
3042 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
3043 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
3045 # Précalcul des inversions de B et R
3049 # Point de démarrage de l'optimisation
3050 Xini = selfA._parameters["InitializationPoint"]
3052 # Définition de la fonction-coût
3053 # ------------------------------
3054 def CostFunction(x):
3055 _X = numpy.ravel( x ).reshape((-1,1))
3056 if selfA._parameters["StoreInternalVariables"] or \
3057 selfA._toStore("CurrentState") or \
3058 selfA._toStore("CurrentOptimum"):
3059 selfA.StoredVariables["CurrentState"].store( _X )
3060 _HX = Hm( _X ).reshape((-1,1))
3061 _Innovation = Y - _HX
3062 if selfA._toStore("SimulatedObservationAtCurrentState") or \
3063 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3064 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
3065 if selfA._toStore("InnovationAtCurrentState"):
3066 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3068 Jb = float( 0.5 * (_X - Xb).T * (BI * (_X - Xb)) )
3069 Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
3072 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3073 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3074 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3075 selfA.StoredVariables["CostFunctionJ" ].store( J )
3076 if selfA._toStore("IndexOfOptimum") or \
3077 selfA._toStore("CurrentOptimum") or \
3078 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3079 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3080 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
3081 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3082 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3083 if selfA._toStore("IndexOfOptimum"):
3084 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3085 if selfA._toStore("CurrentOptimum"):
3086 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3087 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3088 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
3089 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3090 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3091 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3092 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3093 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3094 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3097 def GradientOfCostFunction(x):
3098 _X = numpy.ravel( x ).reshape((-1,1))
3100 _HX = numpy.ravel( _HX ).reshape((-1,1))
3101 GradJb = BI * (_X - Xb)
3102 GradJo = - Ha( (_X, RI * (Y - _HX)) )
3103 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
3106 # Minimisation de la fonctionnelle
3107 # --------------------------------
3108 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3110 if selfA._parameters["Minimizer"] == "LBFGSB":
3111 if "0.19" <= scipy.version.version <= "1.1.0":
3112 import lbfgsbhlt as optimiseur
3114 import scipy.optimize as optimiseur
3115 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3116 func = CostFunction,
3118 fprime = GradientOfCostFunction,
3120 bounds = selfA._parameters["Bounds"],
3121 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
3122 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
3123 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3124 iprint = selfA._parameters["optiprint"],
3126 nfeval = Informations['funcalls']
3127 rc = Informations['warnflag']
3128 elif selfA._parameters["Minimizer"] == "TNC":
3129 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3130 func = CostFunction,
3132 fprime = GradientOfCostFunction,
3134 bounds = selfA._parameters["Bounds"],
3135 maxfun = selfA._parameters["MaximumNumberOfSteps"],
3136 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3137 ftol = selfA._parameters["CostDecrementTolerance"],
3138 messages = selfA._parameters["optmessages"],
3140 elif selfA._parameters["Minimizer"] == "CG":
3141 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3144 fprime = GradientOfCostFunction,
3146 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3147 gtol = selfA._parameters["GradientNormTolerance"],
3148 disp = selfA._parameters["optdisp"],
3151 elif selfA._parameters["Minimizer"] == "NCG":
3152 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3155 fprime = GradientOfCostFunction,
3157 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3158 avextol = selfA._parameters["CostDecrementTolerance"],
3159 disp = selfA._parameters["optdisp"],
3162 elif selfA._parameters["Minimizer"] == "BFGS":
3163 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3166 fprime = GradientOfCostFunction,
3168 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3169 gtol = selfA._parameters["GradientNormTolerance"],
3170 disp = selfA._parameters["optdisp"],
3174 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3176 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3177 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3179 # Correction pour pallier a un bug de TNC sur le retour du Minimum
3180 # ----------------------------------------------------------------
3181 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3182 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3185 #--------------------------
3187 selfA.StoredVariables["Analysis"].store( Xa )
3189 if selfA._toStore("OMA") or \
3190 selfA._toStore("SigmaObs2") or \
3191 selfA._toStore("SimulationQuantiles") or \
3192 selfA._toStore("SimulatedObservationAtOptimum"):
3193 if selfA._toStore("SimulatedObservationAtCurrentState"):
3194 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3195 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3196 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3200 if selfA._toStore("APosterioriCovariance") or \
3201 selfA._toStore("SimulationQuantiles") or \
3202 selfA._toStore("JacobianMatrixAtOptimum") or \
3203 selfA._toStore("KalmanGainAtOptimum"):
3204 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3205 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3206 if selfA._toStore("APosterioriCovariance") or \
3207 selfA._toStore("SimulationQuantiles") or \
3208 selfA._toStore("KalmanGainAtOptimum"):
3209 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3210 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3211 if selfA._toStore("APosterioriCovariance") or \
3212 selfA._toStore("SimulationQuantiles"):
3216 _ee = numpy.zeros(nb)
3218 _HtEE = numpy.dot(HtM,_ee)
3219 HessienneI.append( numpy.ravel( BI * _ee.reshape((-1,1)) + HaM * (RI * _HtEE.reshape((-1,1))) ) )
3220 A = numpy.linalg.inv(numpy.array( HessienneI ))
3221 if min(A.shape) != max(A.shape):
3222 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
3223 if (numpy.diag(A) < 0).any():
3224 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
3225 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
3227 L = numpy.linalg.cholesky( A )
3229 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
3230 if selfA._toStore("APosterioriCovariance"):
3231 selfA.StoredVariables["APosterioriCovariance"].store( A )
3232 if selfA._toStore("JacobianMatrixAtOptimum"):
3233 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3234 if selfA._toStore("KalmanGainAtOptimum"):
3235 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3236 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3237 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3239 if selfA._toStore("Innovation") or \
3240 selfA._toStore("SigmaObs2") or \
3241 selfA._toStore("MahalanobisConsistency") or \
3242 selfA._toStore("OMB"):
3244 if selfA._toStore("Innovation"):
3245 selfA.StoredVariables["Innovation"].store( d )
3246 if selfA._toStore("BMA"):
3247 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3248 if selfA._toStore("OMA"):
3249 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3250 if selfA._toStore("OMB"):
3251 selfA.StoredVariables["OMB"].store( d )
3252 if selfA._toStore("SigmaObs2"):
3253 TraceR = R.trace(Y.size)
3254 selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
3255 if selfA._toStore("MahalanobisConsistency"):
3256 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3257 if selfA._toStore("SimulationQuantiles"):
3258 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
3259 if selfA._toStore("SimulatedObservationAtBackground"):
3260 selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
3261 if selfA._toStore("SimulatedObservationAtOptimum"):
3262 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
3266 # ==============================================================================
3267 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3276 Hm = HO["Direct"].appliedControledFormTo
3277 Mm = EM["Direct"].appliedControledFormTo
3279 if CM is not None and "Tangent" in CM and U is not None:
3280 Cm = CM["Tangent"].asMatrix(Xb)
3286 if hasattr(U,"store") and 1<=_step<len(U) :
3287 _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
3288 elif hasattr(U,"store") and len(U)==1:
3289 _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
3291 _Un = numpy.asmatrix(numpy.ravel( U )).T
3296 if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
3297 _Cm = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
3303 # Remarque : les observations sont exploitées à partir du pas de temps
3304 # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
3305 # Donc le pas 0 n'est pas utilisé puisque la première étape commence
3306 # avec l'observation du pas 1.
3308 # Nombre de pas identique au nombre de pas d'observations
3309 if hasattr(Y,"stepnumber"):
3310 duration = Y.stepnumber()
3314 # Précalcul des inversions de B et R
3318 # Point de démarrage de l'optimisation
3319 Xini = selfA._parameters["InitializationPoint"]
3321 # Définition de la fonction-coût
3322 # ------------------------------
3323 selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
3324 selfA.DirectInnovation = [None,] # Le pas 0 n'est pas observé
3325 def CostFunction(x):
3326 _X = numpy.asmatrix(numpy.ravel( x )).T
3327 if selfA._parameters["StoreInternalVariables"] or \
3328 selfA._toStore("CurrentState") or \
3329 selfA._toStore("CurrentOptimum"):
3330 selfA.StoredVariables["CurrentState"].store( _X )
3331 Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
3332 selfA.DirectCalculation = [None,]
3333 selfA.DirectInnovation = [None,]
3336 for step in range(0,duration-1):
3337 if hasattr(Y,"store"):
3338 _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
3340 _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
3344 if selfA._parameters["EstimationOf"] == "State":
3345 _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
3346 elif selfA._parameters["EstimationOf"] == "Parameters":
3349 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3350 _Xn = ApplyBounds( _Xn, ForceNumericBounds(selfA._parameters["Bounds"]) )
3352 # Etape de différence aux observations
3353 if selfA._parameters["EstimationOf"] == "State":
3354 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
3355 elif selfA._parameters["EstimationOf"] == "Parameters":
3356 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
3358 # Stockage de l'état
3359 selfA.DirectCalculation.append( _Xn )
3360 selfA.DirectInnovation.append( _YmHMX )
3362 # Ajout dans la fonctionnelle d'observation
3363 Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
3366 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3367 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3368 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3369 selfA.StoredVariables["CostFunctionJ" ].store( J )
3370 if selfA._toStore("IndexOfOptimum") or \
3371 selfA._toStore("CurrentOptimum") or \
3372 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3373 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3374 selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3375 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3376 if selfA._toStore("IndexOfOptimum"):
3377 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3378 if selfA._toStore("CurrentOptimum"):
3379 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3380 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3381 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3382 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3383 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3384 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3385 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3388 def GradientOfCostFunction(x):
3389 _X = numpy.asmatrix(numpy.ravel( x )).T
3390 GradJb = BI * (_X - Xb)
3392 for step in range(duration-1,0,-1):
3393 # Étape de récupération du dernier stockage de l'évolution
3394 _Xn = selfA.DirectCalculation.pop()
3395 # Étape de récupération du dernier stockage de l'innovation
3396 _YmHMX = selfA.DirectInnovation.pop()
3397 # Calcul des adjoints
3398 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
3399 Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
3400 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
3401 Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
3402 # Calcul du gradient par état adjoint
3403 GradJo = GradJo + Ha * RI * _YmHMX # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
3404 GradJo = Ma * GradJo # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
3405 GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
3408 # Minimisation de la fonctionnelle
3409 # --------------------------------
3410 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3412 if selfA._parameters["Minimizer"] == "LBFGSB":
3413 if "0.19" <= scipy.version.version <= "1.1.0":
3414 import lbfgsbhlt as optimiseur
3416 import scipy.optimize as optimiseur
3417 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3418 func = CostFunction,
3420 fprime = GradientOfCostFunction,
3422 bounds = selfA._parameters["Bounds"],
3423 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
3424 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
3425 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3426 iprint = selfA._parameters["optiprint"],
3428 nfeval = Informations['funcalls']
3429 rc = Informations['warnflag']
3430 elif selfA._parameters["Minimizer"] == "TNC":
3431 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3432 func = CostFunction,
3434 fprime = GradientOfCostFunction,
3436 bounds = selfA._parameters["Bounds"],
3437 maxfun = selfA._parameters["MaximumNumberOfSteps"],
3438 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3439 ftol = selfA._parameters["CostDecrementTolerance"],
3440 messages = selfA._parameters["optmessages"],
3442 elif selfA._parameters["Minimizer"] == "CG":
3443 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3446 fprime = GradientOfCostFunction,
3448 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3449 gtol = selfA._parameters["GradientNormTolerance"],
3450 disp = selfA._parameters["optdisp"],
3453 elif selfA._parameters["Minimizer"] == "NCG":
3454 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3457 fprime = GradientOfCostFunction,
3459 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3460 avextol = selfA._parameters["CostDecrementTolerance"],
3461 disp = selfA._parameters["optdisp"],
3464 elif selfA._parameters["Minimizer"] == "BFGS":
3465 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3468 fprime = GradientOfCostFunction,
3470 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3471 gtol = selfA._parameters["GradientNormTolerance"],
3472 disp = selfA._parameters["optdisp"],
3476 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3478 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3479 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3481 # Correction pour pallier a un bug de TNC sur le retour du Minimum
3482 # ----------------------------------------------------------------
3483 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3484 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3486 # Obtention de l'analyse
3487 # ----------------------
3488 Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
3490 selfA.StoredVariables["Analysis"].store( Xa )
3492 # Calculs et/ou stockages supplémentaires
3493 # ---------------------------------------
3494 if selfA._toStore("BMA"):
3495 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3499 # ==============================================================================
3500 def stdkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3502 Standard Kalman Filter
3504 if selfA._parameters["EstimationOf"] == "Parameters":
3505 selfA._parameters["StoreInternalVariables"] = True
3509 Ht = HO["Tangent"].asMatrix(Xb)
3510 Ha = HO["Adjoint"].asMatrix(Xb)
3512 if selfA._parameters["EstimationOf"] == "State":
3513 Mt = EM["Tangent"].asMatrix(Xb)
3514 Ma = EM["Adjoint"].asMatrix(Xb)
3516 if CM is not None and "Tangent" in CM and U is not None:
3517 Cm = CM["Tangent"].asMatrix(Xb)
3521 # Durée d'observation et tailles
3522 if hasattr(Y,"stepnumber"):
3523 duration = Y.stepnumber()
3524 __p = numpy.cumprod(Y.shape())[-1]
3527 __p = numpy.array(Y).size
3529 # Précalcul des inversions de B et R
3530 if selfA._parameters["StoreInternalVariables"] \
3531 or selfA._toStore("CostFunctionJ") \
3532 or selfA._toStore("CostFunctionJb") \
3533 or selfA._toStore("CostFunctionJo") \
3534 or selfA._toStore("CurrentOptimum") \
3535 or selfA._toStore("APosterioriCovariance"):
3541 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
3544 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3545 selfA.StoredVariables["Analysis"].store( Xb )
3546 if selfA._toStore("APosterioriCovariance"):
3547 if hasattr(B,"asfullmatrix"):
3548 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
3550 selfA.StoredVariables["APosterioriCovariance"].store( B )
3551 selfA._setInternalState("seed", numpy.random.get_state())
3552 elif selfA._parameters["nextStep"]:
3553 Xn = selfA._getInternalState("Xn")
3554 Pn = selfA._getInternalState("Pn")
3556 if selfA._parameters["EstimationOf"] == "Parameters":
3558 previousJMinimum = numpy.finfo(float).max
3560 for step in range(duration-1):
3561 if hasattr(Y,"store"):
3562 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
3564 Ynpu = numpy.ravel( Y ).reshape((__p,1))
3567 if hasattr(U,"store") and len(U)>1:
3568 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
3569 elif hasattr(U,"store") and len(U)==1:
3570 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
3572 Un = numpy.asmatrix(numpy.ravel( U )).T
3576 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
3577 Xn_predicted = Mt * Xn
3578 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
3579 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
3580 Xn_predicted = Xn_predicted + Cm * Un
3581 Pn_predicted = Q + Mt * Pn * Ma
3582 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
3583 # --- > Par principe, M = Id, Q = 0
3587 if selfA._parameters["EstimationOf"] == "State":
3588 HX_predicted = Ht * Xn_predicted
3589 _Innovation = Ynpu - HX_predicted
3590 elif selfA._parameters["EstimationOf"] == "Parameters":
3591 HX_predicted = Ht * Xn_predicted
3592 _Innovation = Ynpu - HX_predicted
3593 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
3594 _Innovation = _Innovation - Cm * Un
3596 Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
3597 Xn = Xn_predicted + Kn * _Innovation
3598 Pn = Pn_predicted - Kn * Ht * Pn_predicted
3601 #--------------------------
3602 selfA._setInternalState("Xn", Xn)
3603 selfA._setInternalState("Pn", Pn)
3604 #--------------------------
3606 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3607 # ---> avec analysis
3608 selfA.StoredVariables["Analysis"].store( Xa )
3609 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
3610 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Ht * Xa )
3611 if selfA._toStore("InnovationAtCurrentAnalysis"):
3612 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
3613 # ---> avec current state
3614 if selfA._parameters["StoreInternalVariables"] \
3615 or selfA._toStore("CurrentState"):
3616 selfA.StoredVariables["CurrentState"].store( Xn )
3617 if selfA._toStore("ForecastState"):
3618 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
3619 if selfA._toStore("ForecastCovariance"):
3620 selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
3621 if selfA._toStore("BMA"):
3622 selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
3623 if selfA._toStore("InnovationAtCurrentState"):
3624 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3625 if selfA._toStore("SimulatedObservationAtCurrentState") \
3626 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3627 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
3629 if selfA._parameters["StoreInternalVariables"] \
3630 or selfA._toStore("CostFunctionJ") \
3631 or selfA._toStore("CostFunctionJb") \
3632 or selfA._toStore("CostFunctionJo") \
3633 or selfA._toStore("CurrentOptimum") \
3634 or selfA._toStore("APosterioriCovariance"):
3635 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
3636 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
3638 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3639 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3640 selfA.StoredVariables["CostFunctionJ" ].store( J )
3642 if selfA._toStore("IndexOfOptimum") \
3643 or selfA._toStore("CurrentOptimum") \
3644 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
3645 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
3646 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
3647 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3648 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3649 if selfA._toStore("IndexOfOptimum"):
3650 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3651 if selfA._toStore("CurrentOptimum"):
3652 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
3653 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3654 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
3655 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3656 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3657 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3658 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3659 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3660 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3661 if selfA._toStore("APosterioriCovariance"):
3662 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3663 if selfA._parameters["EstimationOf"] == "Parameters" \
3664 and J < previousJMinimum:
3665 previousJMinimum = J
3667 if selfA._toStore("APosterioriCovariance"):
3668 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
3670 # Stockage final supplémentaire de l'optimum en estimation de paramètres
3671 # ----------------------------------------------------------------------
3672 if selfA._parameters["EstimationOf"] == "Parameters":
3673 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3674 selfA.StoredVariables["Analysis"].store( XaMin )
3675 if selfA._toStore("APosterioriCovariance"):
3676 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3677 if selfA._toStore("BMA"):
3678 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3682 # ==============================================================================
3683 def uckf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3685 Constrained Unscented Kalman Filter
3687 if selfA._parameters["EstimationOf"] == "Parameters":
3688 selfA._parameters["StoreInternalVariables"] = True
3689 selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
3692 Alpha = selfA._parameters["Alpha"]
3693 Beta = selfA._parameters["Beta"]
3694 if selfA._parameters["Kappa"] == 0:
3695 if selfA._parameters["EstimationOf"] == "State":
3697 elif selfA._parameters["EstimationOf"] == "Parameters":
3700 Kappa = selfA._parameters["Kappa"]
3701 Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
3702 Gamma = math.sqrt( L + Lambda )
3706 for i in range(2*L):
3707 Ww.append( 1. / (2.*(L + Lambda)) )
3709 Wm = numpy.array( Ww )
3710 Wm[0] = Lambda / (L + Lambda)
3711 Wc = numpy.array( Ww )
3712 Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
3715 Hm = HO["Direct"].appliedControledFormTo
3717 if selfA._parameters["EstimationOf"] == "State":
3718 Mm = EM["Direct"].appliedControledFormTo
3720 if CM is not None and "Tangent" in CM and U is not None:
3721 Cm = CM["Tangent"].asMatrix(Xb)
3725 # Durée d'observation et tailles
3726 if hasattr(Y,"stepnumber"):
3727 duration = Y.stepnumber()
3728 __p = numpy.cumprod(Y.shape())[-1]
3731 __p = numpy.array(Y).size
3733 # Précalcul des inversions de B et R
3734 if selfA._parameters["StoreInternalVariables"] \
3735 or selfA._toStore("CostFunctionJ") \
3736 or selfA._toStore("CostFunctionJb") \
3737 or selfA._toStore("CostFunctionJo") \
3738 or selfA._toStore("CurrentOptimum") \
3739 or selfA._toStore("APosterioriCovariance"):
3745 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
3747 if hasattr(B,"asfullmatrix"):
3748 Pn = B.asfullmatrix(__n)
3751 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3752 selfA.StoredVariables["Analysis"].store( Xb )
3753 if selfA._toStore("APosterioriCovariance"):
3754 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3755 elif selfA._parameters["nextStep"]:
3756 Xn = selfA._getInternalState("Xn")
3757 Pn = selfA._getInternalState("Pn")
3759 if selfA._parameters["EstimationOf"] == "Parameters":
3761 previousJMinimum = numpy.finfo(float).max
3763 for step in range(duration-1):
3764 if hasattr(Y,"store"):
3765 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
3767 Ynpu = numpy.ravel( Y ).reshape((__p,1))
3770 if hasattr(U,"store") and len(U)>1:
3771 Un = numpy.ravel( U[step] ).reshape((-1,1))
3772 elif hasattr(U,"store") and len(U)==1:
3773 Un = numpy.ravel( U[0] ).reshape((-1,1))
3775 Un = numpy.ravel( U ).reshape((-1,1))
3779 Pndemi = numpy.linalg.cholesky(Pn)
3780 Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
3781 nbSpts = 2*Xn.size+1
3783 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3784 for point in range(nbSpts):
3785 Xnp[:,point] = ApplyBounds( Xnp[:,point], selfA._parameters["Bounds"] )
3788 for point in range(nbSpts):
3789 if selfA._parameters["EstimationOf"] == "State":
3790 XEtnnpi = numpy.asmatrix(numpy.ravel( Mm( (Xnp[:,point], Un) ) )).T
3791 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
3792 Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
3793 XEtnnpi = XEtnnpi + Cm * Un
3794 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3795 XEtnnpi = ApplyBounds( XEtnnpi, selfA._parameters["Bounds"] )
3796 elif selfA._parameters["EstimationOf"] == "Parameters":
3797 # --- > Par principe, M = Id, Q = 0
3798 XEtnnpi = Xnp[:,point]
3799 XEtnnp.append( XEtnnpi )
3800 XEtnnp = numpy.hstack( XEtnnp )
3802 Xncm = numpy.matrix( XEtnnp.getA()*numpy.array(Wm) ).sum(axis=1)
3804 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3805 Xncm = ApplyBounds( Xncm, selfA._parameters["Bounds"] )
3807 if selfA._parameters["EstimationOf"] == "State": Pnm = Q
3808 elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
3809 for point in range(nbSpts):
3810 Pnm += Wc[i] * (XEtnnp[:,point]-Xncm) * (XEtnnp[:,point]-Xncm).T
3812 if selfA._parameters["EstimationOf"] == "Parameters" and selfA._parameters["Bounds"] is not None:
3813 Pnmdemi = selfA._parameters["Reconditioner"] * numpy.linalg.cholesky(Pnm)
3815 Pnmdemi = numpy.linalg.cholesky(Pnm)
3817 Xnnp = numpy.hstack([Xncm, Xncm+Gamma*Pnmdemi, Xncm-Gamma*Pnmdemi])
3819 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3820 for point in range(nbSpts):
3821 Xnnp[:,point] = ApplyBounds( Xnnp[:,point], selfA._parameters["Bounds"] )
3824 for point in range(nbSpts):
3825 if selfA._parameters["EstimationOf"] == "State":
3826 Ynnpi = numpy.asmatrix(numpy.ravel( Hm( (Xnnp[:,point], None) ) )).T
3827 elif selfA._parameters["EstimationOf"] == "Parameters":
3828 Ynnpi = numpy.asmatrix(numpy.ravel( Hm( (Xnnp[:,point], Un) ) )).T
3829 Ynnp.append( Ynnpi )
3830 Ynnp = numpy.hstack( Ynnp )
3832 Yncm = numpy.matrix( Ynnp.getA()*numpy.array(Wm) ).sum(axis=1)
3836 for point in range(nbSpts):
3837 Pyyn += Wc[i] * (Ynnp[:,point]-Yncm) * (Ynnp[:,point]-Yncm).T
3838 Pxyn += Wc[i] * (Xnnp[:,point]-Xncm) * (Ynnp[:,point]-Yncm).T
3840 _Innovation = Ynpu - Yncm
3841 if selfA._parameters["EstimationOf"] == "Parameters":
3842 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
3843 _Innovation = _Innovation - Cm * Un
3846 Xn = Xncm + Kn * _Innovation
3847 Pn = Pnm - Kn * Pyyn * Kn.T
3849 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3850 Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
3853 #--------------------------
3854 selfA._setInternalState("Xn", Xn)
3855 selfA._setInternalState("Pn", Pn)
3856 #--------------------------
3858 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3859 # ---> avec analysis
3860 selfA.StoredVariables["Analysis"].store( Xa )
3861 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
3862 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) )
3863 if selfA._toStore("InnovationAtCurrentAnalysis"):
3864 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
3865 # ---> avec current state
3866 if selfA._parameters["StoreInternalVariables"] \
3867 or selfA._toStore("CurrentState"):
3868 selfA.StoredVariables["CurrentState"].store( Xn )
3869 if selfA._toStore("ForecastState"):
3870 selfA.StoredVariables["ForecastState"].store( Xncm )
3871 if selfA._toStore("ForecastCovariance"):
3872 selfA.StoredVariables["ForecastCovariance"].store( Pnm )
3873 if selfA._toStore("BMA"):
3874 selfA.StoredVariables["BMA"].store( Xncm - Xa )
3875 if selfA._toStore("InnovationAtCurrentState"):
3876 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3877 if selfA._toStore("SimulatedObservationAtCurrentState") \
3878 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3879 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm )
3881 if selfA._parameters["StoreInternalVariables"] \
3882 or selfA._toStore("CostFunctionJ") \
3883 or selfA._toStore("CostFunctionJb") \
3884 or selfA._toStore("CostFunctionJo") \
3885 or selfA._toStore("CurrentOptimum") \
3886 or selfA._toStore("APosterioriCovariance"):
3887 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
3888 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
3890 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3891 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3892 selfA.StoredVariables["CostFunctionJ" ].store( J )
3894 if selfA._toStore("IndexOfOptimum") \
3895 or selfA._toStore("CurrentOptimum") \
3896 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
3897 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
3898 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
3899 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3900 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3901 if selfA._toStore("IndexOfOptimum"):
3902 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3903 if selfA._toStore("CurrentOptimum"):
3904 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
3905 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3906 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
3907 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3908 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3909 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3910 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3911 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3912 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3913 if selfA._toStore("APosterioriCovariance"):
3914 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3915 if selfA._parameters["EstimationOf"] == "Parameters" \
3916 and J < previousJMinimum:
3917 previousJMinimum = J
3919 if selfA._toStore("APosterioriCovariance"):
3920 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
3922 # Stockage final supplémentaire de l'optimum en estimation de paramètres
3923 # ----------------------------------------------------------------------
3924 if selfA._parameters["EstimationOf"] == "Parameters":
3925 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3926 selfA.StoredVariables["Analysis"].store( XaMin )
3927 if selfA._toStore("APosterioriCovariance"):
3928 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3929 if selfA._toStore("BMA"):
3930 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3934 # ==============================================================================
3935 def uskf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3937 Unscented Kalman Filter
3939 if selfA._parameters["EstimationOf"] == "Parameters":
3940 selfA._parameters["StoreInternalVariables"] = True
3943 Alpha = selfA._parameters["Alpha"]
3944 Beta = selfA._parameters["Beta"]
3945 if selfA._parameters["Kappa"] == 0:
3946 if selfA._parameters["EstimationOf"] == "State":
3948 elif selfA._parameters["EstimationOf"] == "Parameters":
3951 Kappa = selfA._parameters["Kappa"]
3952 Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
3953 Gamma = math.sqrt( L + Lambda )
3957 for i in range(2*L):
3958 Ww.append( 1. / (2.*(L + Lambda)) )
3960 Wm = numpy.array( Ww )
3961 Wm[0] = Lambda / (L + Lambda)
3962 Wc = numpy.array( Ww )
3963 Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
3966 Hm = HO["Direct"].appliedControledFormTo
3968 if selfA._parameters["EstimationOf"] == "State":
3969 Mm = EM["Direct"].appliedControledFormTo
3971 if CM is not None and "Tangent" in CM and U is not None:
3972 Cm = CM["Tangent"].asMatrix(Xb)
3976 # Durée d'observation et tailles
3977 if hasattr(Y,"stepnumber"):
3978 duration = Y.stepnumber()
3979 __p = numpy.cumprod(Y.shape())[-1]
3982 __p = numpy.array(Y).size
3984 # Précalcul des inversions de B et R
3985 if selfA._parameters["StoreInternalVariables"] \
3986 or selfA._toStore("CostFunctionJ") \
3987 or selfA._toStore("CostFunctionJb") \
3988 or selfA._toStore("CostFunctionJo") \
3989 or selfA._toStore("CurrentOptimum") \
3990 or selfA._toStore("APosterioriCovariance"):
3996 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
3998 if hasattr(B,"asfullmatrix"):
3999 Pn = B.asfullmatrix(__n)
4002 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
4003 selfA.StoredVariables["Analysis"].store( Xb )
4004 if selfA._toStore("APosterioriCovariance"):
4005 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
4006 elif selfA._parameters["nextStep"]:
4007 Xn = selfA._getInternalState("Xn")
4008 Pn = selfA._getInternalState("Pn")
4010 if selfA._parameters["EstimationOf"] == "Parameters":
4012 previousJMinimum = numpy.finfo(float).max
4014 for step in range(duration-1):
4015 if hasattr(Y,"store"):
4016 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
4018 Ynpu = numpy.ravel( Y ).reshape((__p,1))
4021 if hasattr(U,"store") and len(U)>1:
4022 Un = numpy.ravel( U[step] ).reshape((-1,1))
4023 elif hasattr(U,"store") and len(U)==1:
4024 Un = numpy.ravel( U[0] ).reshape((-1,1))
4026 Un = numpy.ravel( U ).reshape((-1,1))
4030 Pndemi = numpy.linalg.cholesky(Pn)
4031 Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
4032 nbSpts = 2*Xn.size+1
4035 for point in range(nbSpts):
4036 if selfA._parameters["EstimationOf"] == "State":
4037 XEtnnpi = numpy.asmatrix(numpy.ravel( Mm( (Xnp[:,point], Un) ) )).T
4038 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
4039 Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
4040 XEtnnpi = XEtnnpi + Cm * Un
4041 elif selfA._parameters["EstimationOf"] == "Parameters":
4042 # --- > Par principe, M = Id, Q = 0
4043 XEtnnpi = Xnp[:,point]
4044 XEtnnp.append( XEtnnpi )
4045 XEtnnp = numpy.hstack( XEtnnp )
4047 Xncm = numpy.matrix( XEtnnp.getA()*numpy.array(Wm) ).sum(axis=1)
4049 if selfA._parameters["EstimationOf"] == "State": Pnm = Q
4050 elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
4051 for point in range(nbSpts):
4052 Pnm += Wc[i] * (XEtnnp[:,point]-Xncm) * (XEtnnp[:,point]-Xncm).T
4054 Pnmdemi = numpy.linalg.cholesky(Pnm)
4056 Xnnp = numpy.hstack([Xncm, Xncm+Gamma*Pnmdemi, Xncm-Gamma*Pnmdemi])
4059 for point in range(nbSpts):
4060 if selfA._parameters["EstimationOf"] == "State":
4061 Ynnpi = numpy.asmatrix(numpy.ravel( Hm( (Xnnp[:,point], None) ) )).T
4062 elif selfA._parameters["EstimationOf"] == "Parameters":
4063 Ynnpi = numpy.asmatrix(numpy.ravel( Hm( (Xnnp[:,point], Un) ) )).T
4064 Ynnp.append( Ynnpi )
4065 Ynnp = numpy.hstack( Ynnp )
4067 Yncm = numpy.matrix( Ynnp.getA()*numpy.array(Wm) ).sum(axis=1)
4071 for point in range(nbSpts):
4072 Pyyn += Wc[i] * (Ynnp[:,point]-Yncm) * (Ynnp[:,point]-Yncm).T
4073 Pxyn += Wc[i] * (Xnnp[:,point]-Xncm) * (Ynnp[:,point]-Yncm).T
4075 _Innovation = Ynpu - Yncm
4076 if selfA._parameters["EstimationOf"] == "Parameters":
4077 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
4078 _Innovation = _Innovation - Cm * Un
4081 Xn = Xncm + Kn * _Innovation
4082 Pn = Pnm - Kn * Pyyn * Kn.T
4085 #--------------------------
4086 selfA._setInternalState("Xn", Xn)
4087 selfA._setInternalState("Pn", Pn)
4088 #--------------------------
4090 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
4091 # ---> avec analysis
4092 selfA.StoredVariables["Analysis"].store( Xa )
4093 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
4094 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) )
4095 if selfA._toStore("InnovationAtCurrentAnalysis"):
4096 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
4097 # ---> avec current state
4098 if selfA._parameters["StoreInternalVariables"] \
4099 or selfA._toStore("CurrentState"):
4100 selfA.StoredVariables["CurrentState"].store( Xn )
4101 if selfA._toStore("ForecastState"):
4102 selfA.StoredVariables["ForecastState"].store( Xncm )
4103 if selfA._toStore("ForecastCovariance"):
4104 selfA.StoredVariables["ForecastCovariance"].store( Pnm )
4105 if selfA._toStore("BMA"):
4106 selfA.StoredVariables["BMA"].store( Xncm - Xa )
4107 if selfA._toStore("InnovationAtCurrentState"):
4108 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
4109 if selfA._toStore("SimulatedObservationAtCurrentState") \
4110 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4111 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm )
4113 if selfA._parameters["StoreInternalVariables"] \
4114 or selfA._toStore("CostFunctionJ") \
4115 or selfA._toStore("CostFunctionJb") \
4116 or selfA._toStore("CostFunctionJo") \
4117 or selfA._toStore("CurrentOptimum") \
4118 or selfA._toStore("APosterioriCovariance"):
4119 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
4120 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
4122 selfA.StoredVariables["CostFunctionJb"].store( Jb )
4123 selfA.StoredVariables["CostFunctionJo"].store( Jo )
4124 selfA.StoredVariables["CostFunctionJ" ].store( J )
4126 if selfA._toStore("IndexOfOptimum") \
4127 or selfA._toStore("CurrentOptimum") \
4128 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
4129 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
4130 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
4131 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4132 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4133 if selfA._toStore("IndexOfOptimum"):
4134 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
4135 if selfA._toStore("CurrentOptimum"):
4136 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
4137 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4138 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
4139 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
4140 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
4141 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
4142 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
4143 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
4144 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
4145 if selfA._toStore("APosterioriCovariance"):
4146 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
4147 if selfA._parameters["EstimationOf"] == "Parameters" \
4148 and J < previousJMinimum:
4149 previousJMinimum = J
4151 if selfA._toStore("APosterioriCovariance"):
4152 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
4154 # Stockage final supplémentaire de l'optimum en estimation de paramètres
4155 # ----------------------------------------------------------------------
4156 if selfA._parameters["EstimationOf"] == "Parameters":
4157 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
4158 selfA.StoredVariables["Analysis"].store( XaMin )
4159 if selfA._toStore("APosterioriCovariance"):
4160 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
4161 if selfA._toStore("BMA"):
4162 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
4166 # ==============================================================================
4167 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
4169 3DVAR variational analysis with no inversion of B
4176 Hm = HO["Direct"].appliedTo
4177 Ha = HO["Adjoint"].appliedInXTo
4179 # Précalcul des inversions de B et R
4183 # Point de démarrage de l'optimisation
4184 Xini = numpy.zeros(Xb.shape)
4186 # Définition de la fonction-coût
4187 # ------------------------------
4188 def CostFunction(v):
4189 _V = numpy.asmatrix(numpy.ravel( v )).T
4191 if selfA._parameters["StoreInternalVariables"] or \
4192 selfA._toStore("CurrentState") or \
4193 selfA._toStore("CurrentOptimum"):
4194 selfA.StoredVariables["CurrentState"].store( _X )
4196 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
4197 _Innovation = Y - _HX
4198 if selfA._toStore("SimulatedObservationAtCurrentState") or \
4199 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4200 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
4201 if selfA._toStore("InnovationAtCurrentState"):
4202 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
4204 Jb = float( 0.5 * _V.T * BT * _V )
4205 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
4208 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
4209 selfA.StoredVariables["CostFunctionJb"].store( Jb )
4210 selfA.StoredVariables["CostFunctionJo"].store( Jo )
4211 selfA.StoredVariables["CostFunctionJ" ].store( J )
4212 if selfA._toStore("IndexOfOptimum") or \
4213 selfA._toStore("CurrentOptimum") or \
4214 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
4215 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
4216 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
4217 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4218 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4219 if selfA._toStore("IndexOfOptimum"):
4220 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
4221 if selfA._toStore("CurrentOptimum"):
4222 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
4223 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4224 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
4225 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
4226 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
4227 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
4228 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
4229 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
4230 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
4233 def GradientOfCostFunction(v):
4234 _V = numpy.asmatrix(numpy.ravel( v )).T
4237 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
4239 GradJo = - Ha( (_X, RI * (Y - _HX)) )
4240 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
4243 # Minimisation de la fonctionnelle
4244 # --------------------------------
4245 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
4247 if selfA._parameters["Minimizer"] == "LBFGSB":
4248 if "0.19" <= scipy.version.version <= "1.1.0":
4249 import lbfgsbhlt as optimiseur
4251 import scipy.optimize as optimiseur
4252 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
4253 func = CostFunction,
4255 fprime = GradientOfCostFunction,
4257 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
4258 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
4259 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
4260 pgtol = selfA._parameters["ProjectedGradientTolerance"],
4261 iprint = selfA._parameters["optiprint"],
4263 nfeval = Informations['funcalls']
4264 rc = Informations['warnflag']
4265 elif selfA._parameters["Minimizer"] == "TNC":
4266 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
4267 func = CostFunction,
4269 fprime = GradientOfCostFunction,
4271 bounds = RecentredBounds(selfA._parameters["Bounds"], Xb),
4272 maxfun = selfA._parameters["MaximumNumberOfSteps"],
4273 pgtol = selfA._parameters["ProjectedGradientTolerance"],
4274 ftol = selfA._parameters["CostDecrementTolerance"],
4275 messages = selfA._parameters["optmessages"],
4277 elif selfA._parameters["Minimizer"] == "CG":
4278 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
4281 fprime = GradientOfCostFunction,
4283 maxiter = selfA._parameters["MaximumNumberOfSteps"],
4284 gtol = selfA._parameters["GradientNormTolerance"],
4285 disp = selfA._parameters["optdisp"],
4288 elif selfA._parameters["Minimizer"] == "NCG":
4289 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
4292 fprime = GradientOfCostFunction,
4294 maxiter = selfA._parameters["MaximumNumberOfSteps"],
4295 avextol = selfA._parameters["CostDecrementTolerance"],
4296 disp = selfA._parameters["optdisp"],
4299 elif selfA._parameters["Minimizer"] == "BFGS":
4300 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
4303 fprime = GradientOfCostFunction,
4305 maxiter = selfA._parameters["MaximumNumberOfSteps"],
4306 gtol = selfA._parameters["GradientNormTolerance"],
4307 disp = selfA._parameters["optdisp"],
4311 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
4313 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4314 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
4316 # Correction pour pallier a un bug de TNC sur le retour du Minimum
4317 # ----------------------------------------------------------------
4318 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
4319 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
4320 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
4322 Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
4324 # Obtention de l'analyse
4325 # ----------------------
4328 selfA.StoredVariables["Analysis"].store( Xa )
4330 if selfA._toStore("OMA") or \
4331 selfA._toStore("SigmaObs2") or \
4332 selfA._toStore("SimulationQuantiles") or \
4333 selfA._toStore("SimulatedObservationAtOptimum"):
4334 if selfA._toStore("SimulatedObservationAtCurrentState"):
4335 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
4336 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4337 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
4341 # Calcul de la covariance d'analyse
4342 # ---------------------------------
4343 if selfA._toStore("APosterioriCovariance") or \
4344 selfA._toStore("SimulationQuantiles") or \
4345 selfA._toStore("JacobianMatrixAtOptimum") or \
4346 selfA._toStore("KalmanGainAtOptimum"):
4347 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
4348 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
4349 if selfA._toStore("APosterioriCovariance") or \
4350 selfA._toStore("SimulationQuantiles") or \
4351 selfA._toStore("KalmanGainAtOptimum"):
4352 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
4353 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
4354 if selfA._toStore("APosterioriCovariance") or \
4355 selfA._toStore("SimulationQuantiles"):
4360 _ee = numpy.matrix(numpy.zeros(nb)).T
4362 _HtEE = numpy.dot(HtM,_ee)
4363 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
4364 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
4365 HessienneI = numpy.matrix( HessienneI )
4367 if min(A.shape) != max(A.shape):
4368 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
4369 if (numpy.diag(A) < 0).any():
4370 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
4371 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
4373 L = numpy.linalg.cholesky( A )
4375 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
4376 if selfA._toStore("APosterioriCovariance"):
4377 selfA.StoredVariables["APosterioriCovariance"].store( A )
4378 if selfA._toStore("JacobianMatrixAtOptimum"):
4379 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
4380 if selfA._toStore("KalmanGainAtOptimum"):
4381 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
4382 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
4383 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
4385 # Calculs et/ou stockages supplémentaires
4386 # ---------------------------------------
4387 if selfA._toStore("Innovation") or \
4388 selfA._toStore("SigmaObs2") or \
4389 selfA._toStore("MahalanobisConsistency") or \
4390 selfA._toStore("OMB"):
4392 if selfA._toStore("Innovation"):
4393 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
4394 if selfA._toStore("BMA"):
4395 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
4396 if selfA._toStore("OMA"):
4397 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
4398 if selfA._toStore("OMB"):
4399 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
4400 if selfA._toStore("SigmaObs2"):
4401 TraceR = R.trace(Y.size)
4402 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
4403 if selfA._toStore("MahalanobisConsistency"):
4404 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
4405 if selfA._toStore("SimulationQuantiles"):
4406 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
4407 if selfA._toStore("SimulatedObservationAtBackground"):
4408 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
4409 if selfA._toStore("SimulatedObservationAtOptimum"):
4410 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
4414 # ==============================================================================
4415 if __name__ == "__main__":
4416 print('\n AUTODIAGNOSTIC\n')