1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les objets numériques génériques.
26 __author__ = "Jean-Philippe ARGAUD"
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
36 # ==============================================================================
37 def ExecuteFunction( triplet ):
38 assert len(triplet) == 3, "Incorrect number of arguments"
39 X, xArgs, funcrepr = triplet
40 __X = numpy.asmatrix(numpy.ravel( X )).T
41 __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42 __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43 __fonction = getattr(__module,funcrepr["__userFunction__name"])
44 sys.path = __sys_path_tmp ; del __sys_path_tmp
45 if isinstance(xArgs, dict):
46 __HX = __fonction( __X, **xArgs )
48 __HX = __fonction( __X )
49 return numpy.ravel( __HX )
51 # ==============================================================================
52 class FDApproximation(object):
54 Cette classe sert d'interface pour définir les opérateurs approximés. A la
55 création d'un objet, en fournissant une fonction "Function", on obtient un
56 objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
57 "AdjointOperator". On contrôle l'approximation DF avec l'incrément
58 multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
59 "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
60 centrées si le booléen "centeredDF" est vrai.
63 name = "FDApproximation",
68 extraArguments = None,
69 avoidingRedundancy = True,
70 toleranceInRedundancy = 1.e-18,
71 lenghtOfRedundancy = -1,
76 self.__name = str(name)
77 self.__extraArgs = extraArguments
80 import multiprocessing
81 self.__mpEnabled = True
83 self.__mpEnabled = False
85 self.__mpEnabled = False
86 self.__mpWorkers = mpWorkers
87 if self.__mpWorkers is not None and self.__mpWorkers < 1:
88 self.__mpWorkers = None
89 logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
92 self.__mfEnabled = True
94 self.__mfEnabled = False
95 logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
97 if avoidingRedundancy:
99 self.__tolerBP = float(toleranceInRedundancy)
100 self.__lenghtRJ = int(lenghtOfRedundancy)
101 self.__listJPCP = [] # Jacobian Previous Calculated Points
102 self.__listJPCI = [] # Jacobian Previous Calculated Increment
103 self.__listJPCR = [] # Jacobian Previous Calculated Results
104 self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
105 self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
107 self.__avoidRC = False
110 if isinstance(Function,types.FunctionType):
111 logging.debug("FDA Calculs en multiprocessing : FunctionType")
112 self.__userFunction__name = Function.__name__
114 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
116 mod = os.path.abspath(Function.__globals__['__file__'])
117 if not os.path.isfile(mod):
118 raise ImportError("No user defined function or method found with the name %s"%(mod,))
119 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
120 self.__userFunction__path = os.path.dirname(mod)
122 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
123 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
124 elif isinstance(Function,types.MethodType):
125 logging.debug("FDA Calculs en multiprocessing : MethodType")
126 self.__userFunction__name = Function.__name__
128 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
130 mod = os.path.abspath(Function.__func__.__globals__['__file__'])
131 if not os.path.isfile(mod):
132 raise ImportError("No user defined function or method found with the name %s"%(mod,))
133 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
134 self.__userFunction__path = os.path.dirname(mod)
136 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
137 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
139 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
141 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
142 self.__userFunction = self.__userOperator.appliedTo
144 self.__centeredDF = bool(centeredDF)
145 if abs(float(increment)) > 1.e-15:
146 self.__increment = float(increment)
148 self.__increment = 0.01
152 self.__dX = numpy.asmatrix(numpy.ravel( dX )).T
153 logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
155 logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
157 # ---------------------------------------------------------
158 def __doublon__(self, e, l, n, v=None):
159 __ac, __iac = False, -1
160 for i in range(len(l)-1,-1,-1):
161 if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
162 __ac, __iac = True, i
163 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
167 # ---------------------------------------------------------
168 def DirectOperator(self, X, **extraArgs ):
170 Calcul du direct à l'aide de la fonction fournie.
172 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
173 ne doivent pas être données ici à la fonction utilisateur.
175 logging.debug("FDA Calcul DirectOperator (explicite)")
177 _HX = self.__userFunction( X, argsAsSerie = True )
179 _X = numpy.asmatrix(numpy.ravel( X )).T
180 _HX = numpy.ravel(self.__userFunction( _X ))
184 # ---------------------------------------------------------
185 def TangentMatrix(self, X ):
187 Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
188 c'est-à-dire le gradient de H en X. On utilise des différences finies
189 directionnelles autour du point X. X est un numpy.matrix.
191 Différences finies centrées (approximation d'ordre 2):
192 1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
193 dX[i] à la composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
194 on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
196 2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
198 3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
200 Différences finies non centrées (approximation d'ordre 1):
201 1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
202 composante X[i] pour composer X_plus_dXi, et on calcule la réponse
203 HX_plus_dXi = H( X_plus_dXi )
204 2/ On calcule la valeur centrale HX = H(X)
205 3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
207 4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
210 logging.debug("FDA Début du calcul de la Jacobienne")
211 logging.debug("FDA Incrément de............: %s*X"%float(self.__increment))
212 logging.debug("FDA Approximation centrée...: %s"%(self.__centeredDF))
214 if X is None or len(X)==0:
215 raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
217 _X = numpy.asmatrix(numpy.ravel( X )).T
219 if self.__dX is None:
220 _dX = self.__increment * _X
222 _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
224 if (_dX == 0.).any():
227 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
229 _dX = numpy.where( _dX == 0., moyenne, _dX )
231 __alreadyCalculated = False
233 __bidon, __alreadyCalculatedP = self.__doublon__(_X, self.__listJPCP, self.__listJPPN, None)
234 __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
235 if __alreadyCalculatedP == __alreadyCalculatedI > -1:
236 __alreadyCalculated, __i = True, __alreadyCalculatedP
237 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
239 if __alreadyCalculated:
240 logging.debug("FDA Calcul Jacobienne (par récupération du doublon %i)"%__i)
241 _Jacobienne = self.__listJPCR[__i]
243 logging.debug("FDA Calcul Jacobienne (explicite)")
244 if self.__centeredDF:
246 if self.__mpEnabled and not self.__mfEnabled:
248 "__userFunction__path" : self.__userFunction__path,
249 "__userFunction__modl" : self.__userFunction__modl,
250 "__userFunction__name" : self.__userFunction__name,
253 for i in range( len(_dX) ):
255 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
256 _X_plus_dXi[i] = _X[i] + _dXi
257 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
258 _X_moins_dXi[i] = _X[i] - _dXi
260 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
261 _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
263 import multiprocessing
264 self.__pool = multiprocessing.Pool(self.__mpWorkers)
265 _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
270 for i in range( len(_dX) ):
271 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
273 elif self.__mfEnabled:
275 for i in range( len(_dX) ):
277 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
278 _X_plus_dXi[i] = _X[i] + _dXi
279 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
280 _X_moins_dXi[i] = _X[i] - _dXi
282 _xserie.append( _X_plus_dXi )
283 _xserie.append( _X_moins_dXi )
285 _HX_plusmoins_dX = self.DirectOperator( _xserie )
288 for i in range( len(_dX) ):
289 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
293 for i in range( _dX.size ):
295 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
296 _X_plus_dXi[i] = _X[i] + _dXi
297 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
298 _X_moins_dXi[i] = _X[i] - _dXi
300 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
301 _HX_moins_dXi = self.DirectOperator( _X_moins_dXi )
303 _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
307 if self.__mpEnabled and not self.__mfEnabled:
309 "__userFunction__path" : self.__userFunction__path,
310 "__userFunction__modl" : self.__userFunction__modl,
311 "__userFunction__name" : self.__userFunction__name,
314 _jobs.append( (_X.A1, self.__extraArgs, funcrepr) )
315 for i in range( len(_dX) ):
316 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
317 _X_plus_dXi[i] = _X[i] + _dX[i]
319 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
321 import multiprocessing
322 self.__pool = multiprocessing.Pool(self.__mpWorkers)
323 _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
327 _HX = _HX_plus_dX.pop(0)
330 for i in range( len(_dX) ):
331 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
333 elif self.__mfEnabled:
335 _xserie.append( _X.A1 )
336 for i in range( len(_dX) ):
337 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
338 _X_plus_dXi[i] = _X[i] + _dX[i]
340 _xserie.append( _X_plus_dXi )
342 _HX_plus_dX = self.DirectOperator( _xserie )
344 _HX = _HX_plus_dX.pop(0)
347 for i in range( len(_dX) ):
348 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
352 _HX = self.DirectOperator( _X )
353 for i in range( _dX.size ):
355 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
356 _X_plus_dXi[i] = _X[i] + _dXi
358 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
360 _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
363 _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
365 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
366 while len(self.__listJPCP) > self.__lenghtRJ:
367 self.__listJPCP.pop(0)
368 self.__listJPCI.pop(0)
369 self.__listJPCR.pop(0)
370 self.__listJPPN.pop(0)
371 self.__listJPIN.pop(0)
372 self.__listJPCP.append( copy.copy(_X) )
373 self.__listJPCI.append( copy.copy(_dX) )
374 self.__listJPCR.append( copy.copy(_Jacobienne) )
375 self.__listJPPN.append( numpy.linalg.norm(_X) )
376 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
378 logging.debug("FDA Fin du calcul de la Jacobienne")
382 # ---------------------------------------------------------
383 def TangentOperator(self, paire, **extraArgs ):
385 Calcul du tangent à l'aide de la Jacobienne.
387 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
388 ne doivent pas être données ici à la fonction utilisateur.
391 assert len(paire) == 1, "Incorrect lenght of arguments"
393 assert len(_paire) == 2, "Incorrect number of arguments"
395 assert len(paire) == 2, "Incorrect number of arguments"
398 _Jacobienne = self.TangentMatrix( X )
399 if dX is None or len(dX) == 0:
401 # Calcul de la forme matricielle si le second argument est None
402 # -------------------------------------------------------------
403 if self.__mfEnabled: return [_Jacobienne,]
404 else: return _Jacobienne
407 # Calcul de la valeur linéarisée de H en X appliqué à dX
408 # ------------------------------------------------------
409 _dX = numpy.asmatrix(numpy.ravel( dX )).T
410 _HtX = numpy.dot(_Jacobienne, _dX)
411 if self.__mfEnabled: return [_HtX.A1,]
414 # ---------------------------------------------------------
415 def AdjointOperator(self, paire, **extraArgs ):
417 Calcul de l'adjoint à l'aide de la Jacobienne.
419 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
420 ne doivent pas être données ici à la fonction utilisateur.
423 assert len(paire) == 1, "Incorrect lenght of arguments"
425 assert len(_paire) == 2, "Incorrect number of arguments"
427 assert len(paire) == 2, "Incorrect number of arguments"
430 _JacobienneT = self.TangentMatrix( X ).T
431 if Y is None or len(Y) == 0:
433 # Calcul de la forme matricielle si le second argument est None
434 # -------------------------------------------------------------
435 if self.__mfEnabled: return [_JacobienneT,]
436 else: return _JacobienneT
439 # Calcul de la valeur de l'adjoint en X appliqué à Y
440 # --------------------------------------------------
441 _Y = numpy.asmatrix(numpy.ravel( Y )).T
442 _HaY = numpy.dot(_JacobienneT, _Y)
443 if self.__mfEnabled: return [_HaY.A1,]
446 # ==============================================================================
447 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
448 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
450 _bgcenter = numpy.ravel(_bgcenter)[:,None]
452 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
454 if _bgcovariance is None:
455 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
457 _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
458 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
460 return BackgroundEnsemble
462 # ==============================================================================
463 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
464 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
465 def __CenteredRandomAnomalies(Zr, N):
467 Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
468 notes manuscrites de MB et conforme au code de PS avec eps = -1
471 Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
472 Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
473 R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
478 _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
480 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
481 if _bgcovariance is None:
482 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
485 U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
486 _nbctl = _bgcenter.size
487 if _nbmembers > _nbctl:
488 _Z = numpy.concatenate((numpy.dot(
489 numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
490 numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
492 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
493 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
494 BackgroundEnsemble = _bgcenter + _Zca
496 if max(abs(_bgcovariance.flatten())) > 0:
497 _nbctl = _bgcenter.size
498 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
499 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
500 BackgroundEnsemble = _bgcenter + _Zca
502 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
504 return BackgroundEnsemble
506 # ==============================================================================
507 def EnsembleOfAnomalies( Ensemble, OptMean = None, Normalisation = 1.):
508 "Renvoie les anomalies centrées à partir d'un ensemble TailleEtat*NbMembres"
510 __Em = numpy.asarray(Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
512 __Em = numpy.ravel(OptMean).reshape((-1,1))
514 return Normalisation * (numpy.asarray(Ensemble) - __Em)
516 # ==============================================================================
517 def EnsembleErrorCovariance( Ensemble, __quick = False ):
518 "Renvoie l'estimation empirique de la covariance d'ensemble"
520 # Covariance rapide mais rarement définie positive
521 __Covariance = numpy.cov(Ensemble)
523 # Résultat souvent identique à numpy.cov, mais plus robuste
524 __n, __m = numpy.asarray(Ensemble).shape
525 __Anomalies = EnsembleOfAnomalies( Ensemble )
526 # Estimation empirique
527 __Covariance = (__Anomalies @ __Anomalies.T) / (__m-1)
529 __Covariance = (__Covariance + __Covariance.T) * 0.5
530 # Assure la positivité
531 __epsilon = mpr*numpy.trace(__Covariance)
532 __Covariance = __Covariance + __epsilon * numpy.identity(__n)
536 # ==============================================================================
537 def EnsemblePerturbationWithGivenCovariance( __Ensemble, __Covariance, __Seed=None ):
538 "Ajout d'une perturbation à chaque membre d'un ensemble selon une covariance prescrite"
539 if hasattr(__Covariance,"assparsematrix"):
540 if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance.assparsematrix())/abs(__Ensemble).mean() < mpr).all():
541 # Traitement d'une covariance nulle ou presque
543 if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance.assparsematrix()) < mpr).all():
544 # Traitement d'une covariance nulle ou presque
547 if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance)/abs(__Ensemble).mean() < mpr).all():
548 # Traitement d'une covariance nulle ou presque
550 if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance) < mpr).all():
551 # Traitement d'une covariance nulle ou presque
554 __n, __m = __Ensemble.shape
555 if __Seed is not None: numpy.random.seed(__Seed)
557 if hasattr(__Covariance,"isscalar") and __Covariance.isscalar():
558 # Traitement d'une covariance multiple de l'identité
560 __std = numpy.sqrt(__Covariance.assparsematrix())
561 __Ensemble += numpy.random.normal(__zero, __std, size=(__m,__n)).T
563 elif hasattr(__Covariance,"isvector") and __Covariance.isvector():
564 # Traitement d'une covariance diagonale avec variances non identiques
565 __zero = numpy.zeros(__n)
566 __std = numpy.sqrt(__Covariance.assparsematrix())
567 __Ensemble += numpy.asarray([numpy.random.normal(__zero, __std) for i in range(__m)]).T
569 elif hasattr(__Covariance,"ismatrix") and __Covariance.ismatrix():
570 # Traitement d'une covariance pleine
571 __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance.asfullmatrix(__n), size=__m).T
573 elif isinstance(__Covariance, numpy.ndarray):
574 # Traitement d'une covariance numpy pleine, sachant qu'on arrive ici en dernier
575 __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance, size=__m).T
578 raise ValueError("Error in ensemble perturbation with inadequate covariance specification")
582 # ==============================================================================
583 def CovarianceInflation(
585 InflationType = None,
586 InflationFactor = None,
587 BackgroundCov = None,
590 Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
592 Synthèse : Hunt 2007, section 2.3.5
594 if InflationFactor is None:
597 InflationFactor = float(InflationFactor)
599 if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
600 if InflationFactor < 1.:
601 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
602 if InflationFactor < 1.+mpr:
604 OutputCovOrEns = InflationFactor**2 * InputCovOrEns
606 elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
607 if InflationFactor < 1.:
608 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
609 if InflationFactor < 1.+mpr:
611 InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
612 OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
613 + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
615 elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
616 if InflationFactor < 0.:
617 raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
618 if InflationFactor < mpr:
620 __n, __m = numpy.asarray(InputCovOrEns).shape
622 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
623 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
625 elif InflationType == "HybridOnBackgroundCovariance":
626 if InflationFactor < 0.:
627 raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
628 if InflationFactor < mpr:
630 __n, __m = numpy.asarray(InputCovOrEns).shape
632 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
633 if BackgroundCov is None:
634 raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
635 if InputCovOrEns.shape != BackgroundCov.shape:
636 raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
637 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
639 elif InflationType == "Relaxation":
640 raise NotImplementedError("InflationType Relaxation")
643 raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
645 return OutputCovOrEns
647 # ==============================================================================
648 def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None):
649 "Estimation des quantiles a posteriori (selfA est modifié)"
650 nbsamples = selfA._parameters["NumberOfSamplesForQuantiles"]
652 # Traitement des bornes
653 if "StateBoundsForQuantiles" in selfA._parameters:
654 LBounds = selfA._parameters["StateBoundsForQuantiles"] # Prioritaire
655 elif "Bounds" in selfA._parameters:
656 LBounds = selfA._parameters["Bounds"] # Défaut raisonnable
659 if LBounds is not None:
660 def NoneRemove(paire):
662 if bmin is None: bmin = numpy.finfo('float').min
663 if bmax is None: bmax = numpy.finfo('float').max
665 LBounds = numpy.matrix( [NoneRemove(paire) for paire in LBounds] )
667 # Échantillonnage des états
670 if selfA._parameters["SimulationForQuantiles"] == "Linear" and HXa is not None:
671 HXa = numpy.matrix(numpy.ravel( HXa )).T
672 for i in range(nbsamples):
673 if selfA._parameters["SimulationForQuantiles"] == "Linear" and HtM is not None:
674 dXr = numpy.matrix(numpy.random.multivariate_normal(numpy.ravel(Xa),A) - numpy.ravel(Xa)).T
675 if LBounds is not None: # "EstimateProjection" par défaut
676 dXr = numpy.max(numpy.hstack((dXr,LBounds[:,0]) - Xa),axis=1)
677 dXr = numpy.min(numpy.hstack((dXr,LBounds[:,1]) - Xa),axis=1)
678 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
680 if selfA._toStore("SampledStateForQuantiles"): Xr = Xa + dXr
681 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear" and Hm is not None:
682 Xr = numpy.matrix(numpy.random.multivariate_normal(numpy.ravel(Xa),A)).T
683 if LBounds is not None: # "EstimateProjection" par défaut
684 Xr = numpy.max(numpy.hstack((Xr,LBounds[:,0])),axis=1)
685 Xr = numpy.min(numpy.hstack((Xr,LBounds[:,1])),axis=1)
686 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
688 raise ValueError("Quantile simulations has only to be Linear or NonLinear.")
692 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.ravel(Xr)
694 YfQ = numpy.hstack((YfQ,Yr))
695 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.vstack((EXr,numpy.ravel(Xr)))
697 # Extraction des quantiles
700 for quantile in selfA._parameters["Quantiles"]:
701 if not (0. <= float(quantile) <= 1.): continue
702 indice = int(nbsamples * float(quantile) - 1./nbsamples)
703 if YQ is None: YQ = YfQ[:,indice]
704 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
705 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
706 if selfA._toStore("SampledStateForQuantiles"):
707 selfA.StoredVariables["SampledStateForQuantiles"].store( EXr.T )
711 # ==============================================================================
712 def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
718 H = HO["Direct"].appliedControledFormTo
720 if selfA._parameters["EstimationOf"] == "State":
721 M = EM["Direct"].appliedControledFormTo
723 if CM is not None and "Tangent" in CM and U is not None:
724 Cm = CM["Tangent"].asMatrix(Xb)
728 # Précalcul des inversions de B et R
731 # Durée d'observation et tailles
732 LagL = selfA._parameters["SmootherLagL"]
733 if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
734 raise ValueError("Fixed-lag smoother requires a series of observation")
735 if Y.stepnumber() < LagL:
736 raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
737 duration = Y.stepnumber()
738 __p = numpy.cumprod(Y.shape())[-1]
740 __m = selfA._parameters["NumberOfMembers"]
742 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
744 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
745 selfA.StoredVariables["Analysis"].store( Xb )
746 if selfA._toStore("APosterioriCovariance"):
747 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
750 # Calcul direct initial (on privilégie la mémorisation au recalcul)
751 __seed = numpy.random.get_state()
752 selfB = copy.deepcopy(selfA)
753 selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
754 if VariantM == "EnKS16-KalmanFilterFormula":
755 etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
757 raise ValueError("VariantM has to be chosen in the authorized methods list.")
759 EL = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
761 EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
762 selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
764 for step in range(LagL,duration-1):
766 sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
769 if hasattr(Y,"store"):
770 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
772 Ynpu = numpy.ravel( Y ).reshape((__p,1))
775 if hasattr(U,"store") and len(U)>1:
776 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
777 elif hasattr(U,"store") and len(U)==1:
778 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
780 Un = numpy.asmatrix(numpy.ravel( U )).T
784 #--------------------------
785 if VariantM == "EnKS16-KalmanFilterFormula":
786 if selfA._parameters["EstimationOf"] == "State": # Forecast
787 EL = M( [(EL[:,i], Un) for i in range(__m)],
789 returnSerieAsArrayMatrix = True )
790 EL = EnsemblePerturbationWithGivenCovariance( EL, Q )
791 EZ = H( [(EL[:,i], Un) for i in range(__m)],
793 returnSerieAsArrayMatrix = True )
794 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
795 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
797 elif selfA._parameters["EstimationOf"] == "Parameters":
798 # --- > Par principe, M = Id, Q = 0
799 EZ = H( [(EL[:,i], Un) for i in range(__m)],
801 returnSerieAsArrayMatrix = True )
803 vEm = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
804 vZm = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
806 mS = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
807 mS = mS.reshape((-1,__m)) # Pour dimension 1
808 delta = RIdemi @ ( Ynpu - vZm )
809 mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
810 vw = mT @ mS.T @ delta
812 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
813 mU = numpy.identity(__m)
814 wTU = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
816 EX = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
820 for irl in range(LagL): # Lissage des L précédentes analysis
821 vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
822 EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
823 sEL[irl] = vEm + EX @ wTU
825 # Conservation de l'analyse retrospective d'ordre 0 avant rotation
826 Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
827 if selfA._toStore("APosterioriCovariance"):
830 for irl in range(LagL):
831 sEL[irl] = sEL[irl+1]
833 #--------------------------
835 raise ValueError("VariantM has to be chosen in the authorized methods list.")
837 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
839 selfA.StoredVariables["Analysis"].store( Xa )
840 if selfA._toStore("APosterioriCovariance"):
841 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
843 # Stockage des dernières analyses incomplètement remises à jour
844 for irl in range(LagL):
845 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
846 Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
847 selfA.StoredVariables["Analysis"].store( Xa )
851 # ==============================================================================
852 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
854 Ensemble-Transform EnKF
856 if selfA._parameters["EstimationOf"] == "Parameters":
857 selfA._parameters["StoreInternalVariables"] = True
861 H = HO["Direct"].appliedControledFormTo
863 if selfA._parameters["EstimationOf"] == "State":
864 M = EM["Direct"].appliedControledFormTo
866 if CM is not None and "Tangent" in CM and U is not None:
867 Cm = CM["Tangent"].asMatrix(Xb)
871 # Nombre de pas identique au nombre de pas d'observations
872 # -------------------------------------------------------
873 if hasattr(Y,"stepnumber"):
874 duration = Y.stepnumber()
875 __p = numpy.cumprod(Y.shape())[-1]
878 __p = numpy.array(Y).size
880 # Précalcul des inversions de B et R
881 # ----------------------------------
882 if selfA._parameters["StoreInternalVariables"] \
883 or selfA._toStore("CostFunctionJ") \
884 or selfA._toStore("CostFunctionJb") \
885 or selfA._toStore("CostFunctionJo") \
886 or selfA._toStore("CurrentOptimum") \
887 or selfA._toStore("APosterioriCovariance"):
890 elif VariantM != "KalmanFilterFormula":
892 if VariantM == "KalmanFilterFormula":
898 __m = selfA._parameters["NumberOfMembers"]
899 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
901 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
902 #~ Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
904 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
905 selfA.StoredVariables["Analysis"].store( Xb )
906 if selfA._toStore("APosterioriCovariance"):
907 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
910 previousJMinimum = numpy.finfo(float).max
912 for step in range(duration-1):
913 if hasattr(Y,"store"):
914 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
916 Ynpu = numpy.ravel( Y ).reshape((__p,1))
919 if hasattr(U,"store") and len(U)>1:
920 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
921 elif hasattr(U,"store") and len(U)==1:
922 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
924 Un = numpy.asmatrix(numpy.ravel( U )).T
928 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
929 Xn = CovarianceInflation( Xn,
930 selfA._parameters["InflationType"],
931 selfA._parameters["InflationFactor"],
934 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
935 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
937 returnSerieAsArrayMatrix = True )
938 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
939 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
941 returnSerieAsArrayMatrix = True )
942 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
943 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
944 Xn_predicted = Xn_predicted + Cm * Un
945 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
946 # --- > Par principe, M = Id, Q = 0
948 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
950 returnSerieAsArrayMatrix = True )
952 # Mean of forecast and observation of forecast
953 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
954 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
957 EaX = EnsembleOfAnomalies( Xn_predicted, Xfm )
958 EaHX = EnsembleOfAnomalies( HX_predicted, Hfm)
960 #--------------------------
961 if VariantM == "KalmanFilterFormula":
962 mS = RIdemi * EaHX / math.sqrt(__m-1)
963 mS = mS.reshape((-1,__m)) # Pour dimension 1
964 delta = RIdemi * ( Ynpu - Hfm )
965 mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
966 vw = mT @ mS.T @ delta
968 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
969 mU = numpy.identity(__m)
971 EaX = EaX / math.sqrt(__m-1)
972 Xn = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
973 #--------------------------
974 elif VariantM == "Variational":
975 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
977 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
978 _Jo = 0.5 * _A.T @ (RI * _A)
979 _Jb = 0.5 * (__m-1) * w.T @ w
982 def GradientOfCostFunction(w):
983 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
984 _GardJo = - EaHX.T @ (RI * _A)
985 _GradJb = (__m-1) * w.reshape((__m,1))
986 _GradJ = _GardJo + _GradJb
987 return numpy.ravel(_GradJ)
988 vw = scipy.optimize.fmin_cg(
990 x0 = numpy.zeros(__m),
991 fprime = GradientOfCostFunction,
996 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
997 Htb = (__m-1) * numpy.identity(__m)
1000 Pta = numpy.linalg.inv( Hta )
1001 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1003 Xn = Xfm + EaX @ (vw[:,None] + EWa)
1004 #--------------------------
1005 elif VariantM == "FiniteSize11": # Jauge Boc2011
1006 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1007 def CostFunction(w):
1008 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1009 _Jo = 0.5 * _A.T @ (RI * _A)
1010 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
1013 def GradientOfCostFunction(w):
1014 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1015 _GardJo = - EaHX.T @ (RI * _A)
1016 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1017 _GradJ = _GardJo + _GradJb
1018 return numpy.ravel(_GradJ)
1019 vw = scipy.optimize.fmin_cg(
1021 x0 = numpy.zeros(__m),
1022 fprime = GradientOfCostFunction,
1027 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1029 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1030 / (1 + 1/__m + vw.T @ vw)**2
1033 Pta = numpy.linalg.inv( Hta )
1034 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1036 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1037 #--------------------------
1038 elif VariantM == "FiniteSize15": # Jauge Boc2015
1039 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1040 def CostFunction(w):
1041 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1042 _Jo = 0.5 * _A.T * RI * _A
1043 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
1046 def GradientOfCostFunction(w):
1047 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1048 _GardJo = - EaHX.T @ (RI * _A)
1049 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1050 _GradJ = _GardJo + _GradJb
1051 return numpy.ravel(_GradJ)
1052 vw = scipy.optimize.fmin_cg(
1054 x0 = numpy.zeros(__m),
1055 fprime = GradientOfCostFunction,
1060 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1062 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1063 / (1 + 1/__m + vw.T @ vw)**2
1066 Pta = numpy.linalg.inv( Hta )
1067 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1069 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1070 #--------------------------
1071 elif VariantM == "FiniteSize16": # Jauge Boc2016
1072 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1073 def CostFunction(w):
1074 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1075 _Jo = 0.5 * _A.T @ (RI * _A)
1076 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
1079 def GradientOfCostFunction(w):
1080 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1081 _GardJo = - EaHX.T @ (RI * _A)
1082 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
1083 _GradJ = _GardJo + _GradJb
1084 return numpy.ravel(_GradJ)
1085 vw = scipy.optimize.fmin_cg(
1087 x0 = numpy.zeros(__m),
1088 fprime = GradientOfCostFunction,
1093 Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1094 Htb = ((__m+1) / (__m-1)) * \
1095 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
1096 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
1099 Pta = numpy.linalg.inv( Hta )
1100 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1102 Xn = Xfm + EaX @ (vw[:,None] + EWa)
1103 #--------------------------
1105 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1107 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1108 Xn = CovarianceInflation( Xn,
1109 selfA._parameters["InflationType"],
1110 selfA._parameters["InflationFactor"],
1113 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1114 #--------------------------
1116 if selfA._parameters["StoreInternalVariables"] \
1117 or selfA._toStore("CostFunctionJ") \
1118 or selfA._toStore("CostFunctionJb") \
1119 or selfA._toStore("CostFunctionJo") \
1120 or selfA._toStore("APosterioriCovariance") \
1121 or selfA._toStore("InnovationAtCurrentAnalysis") \
1122 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1123 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1124 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1125 _Innovation = Ynpu - _HXa
1127 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1128 # ---> avec analysis
1129 selfA.StoredVariables["Analysis"].store( Xa )
1130 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1131 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1132 if selfA._toStore("InnovationAtCurrentAnalysis"):
1133 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1134 # ---> avec current state
1135 if selfA._parameters["StoreInternalVariables"] \
1136 or selfA._toStore("CurrentState"):
1137 selfA.StoredVariables["CurrentState"].store( Xn )
1138 if selfA._toStore("ForecastState"):
1139 selfA.StoredVariables["ForecastState"].store( EMX )
1140 if selfA._toStore("BMA"):
1141 selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
1142 if selfA._toStore("InnovationAtCurrentState"):
1143 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1144 if selfA._toStore("SimulatedObservationAtCurrentState") \
1145 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1146 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1148 if selfA._parameters["StoreInternalVariables"] \
1149 or selfA._toStore("CostFunctionJ") \
1150 or selfA._toStore("CostFunctionJb") \
1151 or selfA._toStore("CostFunctionJo") \
1152 or selfA._toStore("CurrentOptimum") \
1153 or selfA._toStore("APosterioriCovariance"):
1154 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1155 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1157 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1158 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1159 selfA.StoredVariables["CostFunctionJ" ].store( J )
1161 if selfA._toStore("IndexOfOptimum") \
1162 or selfA._toStore("CurrentOptimum") \
1163 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1164 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1165 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1166 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1167 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1168 if selfA._toStore("IndexOfOptimum"):
1169 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1170 if selfA._toStore("CurrentOptimum"):
1171 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1172 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1173 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1174 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1175 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1176 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1177 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1178 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1179 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1180 if selfA._toStore("APosterioriCovariance"):
1181 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1182 if selfA._parameters["EstimationOf"] == "Parameters" \
1183 and J < previousJMinimum:
1184 previousJMinimum = J
1186 if selfA._toStore("APosterioriCovariance"):
1187 covarianceXaMin = Pn
1188 # ---> Pour les smoothers
1189 if selfA._toStore("CurrentEnsembleState"):
1190 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
1192 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1193 # ----------------------------------------------------------------------
1194 if selfA._parameters["EstimationOf"] == "Parameters":
1195 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1196 selfA.StoredVariables["Analysis"].store( XaMin )
1197 if selfA._toStore("APosterioriCovariance"):
1198 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1199 if selfA._toStore("BMA"):
1200 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1204 # ==============================================================================
1205 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
1206 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1210 if selfA._parameters["EstimationOf"] == "Parameters":
1211 selfA._parameters["StoreInternalVariables"] = True
1215 H = HO["Direct"].appliedControledFormTo
1217 if selfA._parameters["EstimationOf"] == "State":
1218 M = EM["Direct"].appliedControledFormTo
1220 if CM is not None and "Tangent" in CM and U is not None:
1221 Cm = CM["Tangent"].asMatrix(Xb)
1225 # Nombre de pas identique au nombre de pas d'observations
1226 # -------------------------------------------------------
1227 if hasattr(Y,"stepnumber"):
1228 duration = Y.stepnumber()
1229 __p = numpy.cumprod(Y.shape())[-1]
1232 __p = numpy.array(Y).size
1234 # Précalcul des inversions de B et R
1235 # ----------------------------------
1236 if selfA._parameters["StoreInternalVariables"] \
1237 or selfA._toStore("CostFunctionJ") \
1238 or selfA._toStore("CostFunctionJb") \
1239 or selfA._toStore("CostFunctionJo") \
1240 or selfA._toStore("CurrentOptimum") \
1241 or selfA._toStore("APosterioriCovariance"):
1248 __m = selfA._parameters["NumberOfMembers"]
1249 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1251 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1253 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
1255 Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
1257 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1258 selfA.StoredVariables["Analysis"].store( Xb )
1259 if selfA._toStore("APosterioriCovariance"):
1260 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1263 previousJMinimum = numpy.finfo(float).max
1265 for step in range(duration-1):
1266 if hasattr(Y,"store"):
1267 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1269 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1272 if hasattr(U,"store") and len(U)>1:
1273 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1274 elif hasattr(U,"store") and len(U)==1:
1275 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1277 Un = numpy.asmatrix(numpy.ravel( U )).T
1281 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1282 Xn = CovarianceInflation( Xn,
1283 selfA._parameters["InflationType"],
1284 selfA._parameters["InflationFactor"],
1287 #--------------------------
1288 if VariantM == "IEnKF12":
1289 Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
1290 EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
1294 Ta = numpy.identity(__m)
1295 vw = numpy.zeros(__m)
1296 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1297 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1300 E1 = vx1 + _epsilon * EaX
1302 E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1304 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
1305 E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1307 returnSerieAsArrayMatrix = True )
1308 elif selfA._parameters["EstimationOf"] == "Parameters":
1309 # --- > Par principe, M = Id
1311 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1312 vy1 = H((vx2, Un)).reshape((__p,1))
1314 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
1316 returnSerieAsArrayMatrix = True )
1317 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1320 EaY = (HE2 - vy2) / _epsilon
1322 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1324 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
1325 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
1326 Deltaw = - numpy.linalg.solve(mH,GradJ)
1331 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1335 A2 = EnsembleOfAnomalies( E2 )
1338 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1339 A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
1342 #--------------------------
1344 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1346 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1347 Xn = CovarianceInflation( Xn,
1348 selfA._parameters["InflationType"],
1349 selfA._parameters["InflationFactor"],
1352 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1353 #--------------------------
1355 if selfA._parameters["StoreInternalVariables"] \
1356 or selfA._toStore("CostFunctionJ") \
1357 or selfA._toStore("CostFunctionJb") \
1358 or selfA._toStore("CostFunctionJo") \
1359 or selfA._toStore("APosterioriCovariance") \
1360 or selfA._toStore("InnovationAtCurrentAnalysis") \
1361 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1362 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1363 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1364 _Innovation = Ynpu - _HXa
1366 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1367 # ---> avec analysis
1368 selfA.StoredVariables["Analysis"].store( Xa )
1369 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1370 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1371 if selfA._toStore("InnovationAtCurrentAnalysis"):
1372 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1373 # ---> avec current state
1374 if selfA._parameters["StoreInternalVariables"] \
1375 or selfA._toStore("CurrentState"):
1376 selfA.StoredVariables["CurrentState"].store( Xn )
1377 if selfA._toStore("ForecastState"):
1378 selfA.StoredVariables["ForecastState"].store( E2 )
1379 if selfA._toStore("BMA"):
1380 selfA.StoredVariables["BMA"].store( E2 - Xa )
1381 if selfA._toStore("InnovationAtCurrentState"):
1382 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1383 if selfA._toStore("SimulatedObservationAtCurrentState") \
1384 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1385 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1387 if selfA._parameters["StoreInternalVariables"] \
1388 or selfA._toStore("CostFunctionJ") \
1389 or selfA._toStore("CostFunctionJb") \
1390 or selfA._toStore("CostFunctionJo") \
1391 or selfA._toStore("CurrentOptimum") \
1392 or selfA._toStore("APosterioriCovariance"):
1393 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1394 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1396 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1397 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1398 selfA.StoredVariables["CostFunctionJ" ].store( J )
1400 if selfA._toStore("IndexOfOptimum") \
1401 or selfA._toStore("CurrentOptimum") \
1402 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1403 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1404 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1405 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1406 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1407 if selfA._toStore("IndexOfOptimum"):
1408 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1409 if selfA._toStore("CurrentOptimum"):
1410 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1411 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1412 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1413 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1414 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1415 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1416 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1417 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1418 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1419 if selfA._toStore("APosterioriCovariance"):
1420 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1421 if selfA._parameters["EstimationOf"] == "Parameters" \
1422 and J < previousJMinimum:
1423 previousJMinimum = J
1425 if selfA._toStore("APosterioriCovariance"):
1426 covarianceXaMin = Pn
1428 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1429 # ----------------------------------------------------------------------
1430 if selfA._parameters["EstimationOf"] == "Parameters":
1431 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1432 selfA.StoredVariables["Analysis"].store( XaMin )
1433 if selfA._toStore("APosterioriCovariance"):
1434 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1435 if selfA._toStore("BMA"):
1436 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1440 # ==============================================================================
1441 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1449 # Opérateur non-linéaire pour la boucle externe
1450 Hm = HO["Direct"].appliedTo
1452 # Précalcul des inversions de B et R
1456 # Point de démarrage de l'optimisation
1457 Xini = selfA._parameters["InitializationPoint"]
1459 HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
1460 Innovation = Y - HXb
1467 Xr = Xini.reshape((-1,1))
1468 while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
1472 Ht = HO["Tangent"].asMatrix(Xr)
1473 Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
1475 # Définition de la fonction-coût
1476 # ------------------------------
1477 def CostFunction(dx):
1478 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1479 if selfA._parameters["StoreInternalVariables"] or \
1480 selfA._toStore("CurrentState") or \
1481 selfA._toStore("CurrentOptimum"):
1482 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
1484 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1485 _dInnovation = Innovation - _HdX
1486 if selfA._toStore("SimulatedObservationAtCurrentState") or \
1487 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1488 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
1489 if selfA._toStore("InnovationAtCurrentState"):
1490 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
1492 Jb = float( 0.5 * _dX.T * BI * _dX )
1493 Jo = float( 0.5 * _dInnovation.T * RI * _dInnovation )
1496 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1497 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1498 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1499 selfA.StoredVariables["CostFunctionJ" ].store( J )
1500 if selfA._toStore("IndexOfOptimum") or \
1501 selfA._toStore("CurrentOptimum") or \
1502 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1503 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1504 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1505 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1506 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1507 if selfA._toStore("IndexOfOptimum"):
1508 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1509 if selfA._toStore("CurrentOptimum"):
1510 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1511 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1512 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1513 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1514 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1515 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1516 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1517 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1518 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1521 def GradientOfCostFunction(dx):
1522 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1524 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1525 _dInnovation = Innovation - _HdX
1527 GradJo = - Ht.T @ (RI * _dInnovation)
1528 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1531 # Minimisation de la fonctionnelle
1532 # --------------------------------
1533 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1535 if selfA._parameters["Minimizer"] == "LBFGSB":
1536 # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1537 if "0.19" <= scipy.version.version <= "1.1.0":
1538 import lbfgsbhlt as optimiseur
1540 import scipy.optimize as optimiseur
1541 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1542 func = CostFunction,
1543 x0 = numpy.zeros(Xini.size),
1544 fprime = GradientOfCostFunction,
1546 bounds = selfA._parameters["Bounds"],
1547 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
1548 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
1549 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1550 iprint = selfA._parameters["optiprint"],
1552 nfeval = Informations['funcalls']
1553 rc = Informations['warnflag']
1554 elif selfA._parameters["Minimizer"] == "TNC":
1555 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1556 func = CostFunction,
1557 x0 = numpy.zeros(Xini.size),
1558 fprime = GradientOfCostFunction,
1560 bounds = selfA._parameters["Bounds"],
1561 maxfun = selfA._parameters["MaximumNumberOfSteps"],
1562 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1563 ftol = selfA._parameters["CostDecrementTolerance"],
1564 messages = selfA._parameters["optmessages"],
1566 elif selfA._parameters["Minimizer"] == "CG":
1567 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1569 x0 = numpy.zeros(Xini.size),
1570 fprime = GradientOfCostFunction,
1572 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1573 gtol = selfA._parameters["GradientNormTolerance"],
1574 disp = selfA._parameters["optdisp"],
1577 elif selfA._parameters["Minimizer"] == "NCG":
1578 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1580 x0 = numpy.zeros(Xini.size),
1581 fprime = GradientOfCostFunction,
1583 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1584 avextol = selfA._parameters["CostDecrementTolerance"],
1585 disp = selfA._parameters["optdisp"],
1588 elif selfA._parameters["Minimizer"] == "BFGS":
1589 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1591 x0 = numpy.zeros(Xini.size),
1592 fprime = GradientOfCostFunction,
1594 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1595 gtol = selfA._parameters["GradientNormTolerance"],
1596 disp = selfA._parameters["optdisp"],
1600 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1602 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1603 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1605 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1606 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1607 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1609 Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
1612 DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
1613 iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
1615 # Obtention de l'analyse
1616 # ----------------------
1619 selfA.StoredVariables["Analysis"].store( Xa )
1621 if selfA._toStore("OMA") or \
1622 selfA._toStore("SigmaObs2") or \
1623 selfA._toStore("SimulationQuantiles") or \
1624 selfA._toStore("SimulatedObservationAtOptimum"):
1625 if selfA._toStore("SimulatedObservationAtCurrentState"):
1626 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1627 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1628 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1632 # Calcul de la covariance d'analyse
1633 # ---------------------------------
1634 if selfA._toStore("APosterioriCovariance") or \
1635 selfA._toStore("SimulationQuantiles") or \
1636 selfA._toStore("JacobianMatrixAtOptimum") or \
1637 selfA._toStore("KalmanGainAtOptimum"):
1638 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1639 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1640 if selfA._toStore("APosterioriCovariance") or \
1641 selfA._toStore("SimulationQuantiles") or \
1642 selfA._toStore("KalmanGainAtOptimum"):
1643 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1644 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1645 if selfA._toStore("APosterioriCovariance") or \
1646 selfA._toStore("SimulationQuantiles"):
1650 _ee = numpy.matrix(numpy.zeros(nb)).T
1652 _HtEE = numpy.dot(HtM,_ee)
1653 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
1654 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1655 HessienneI = numpy.matrix( HessienneI )
1657 if min(A.shape) != max(A.shape):
1658 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1659 if (numpy.diag(A) < 0).any():
1660 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1661 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1663 L = numpy.linalg.cholesky( A )
1665 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1666 if selfA._toStore("APosterioriCovariance"):
1667 selfA.StoredVariables["APosterioriCovariance"].store( A )
1668 if selfA._toStore("JacobianMatrixAtOptimum"):
1669 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1670 if selfA._toStore("KalmanGainAtOptimum"):
1671 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1672 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1673 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1675 # Calculs et/ou stockages supplémentaires
1676 # ---------------------------------------
1677 if selfA._toStore("Innovation") or \
1678 selfA._toStore("SigmaObs2") or \
1679 selfA._toStore("MahalanobisConsistency") or \
1680 selfA._toStore("OMB"):
1682 if selfA._toStore("Innovation"):
1683 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1684 if selfA._toStore("BMA"):
1685 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1686 if selfA._toStore("OMA"):
1687 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1688 if selfA._toStore("OMB"):
1689 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1690 if selfA._toStore("SigmaObs2"):
1691 TraceR = R.trace(Y.size)
1692 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1693 if selfA._toStore("MahalanobisConsistency"):
1694 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1695 if selfA._toStore("SimulationQuantiles"):
1696 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
1697 if selfA._toStore("SimulatedObservationAtBackground"):
1698 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1699 if selfA._toStore("SimulatedObservationAtOptimum"):
1700 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1704 # ==============================================================================
1705 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
1706 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1708 Maximum Likelihood Ensemble Filter
1710 if selfA._parameters["EstimationOf"] == "Parameters":
1711 selfA._parameters["StoreInternalVariables"] = True
1715 H = HO["Direct"].appliedControledFormTo
1717 if selfA._parameters["EstimationOf"] == "State":
1718 M = EM["Direct"].appliedControledFormTo
1720 if CM is not None and "Tangent" in CM and U is not None:
1721 Cm = CM["Tangent"].asMatrix(Xb)
1725 # Nombre de pas identique au nombre de pas d'observations
1726 # -------------------------------------------------------
1727 if hasattr(Y,"stepnumber"):
1728 duration = Y.stepnumber()
1729 __p = numpy.cumprod(Y.shape())[-1]
1732 __p = numpy.array(Y).size
1734 # Précalcul des inversions de B et R
1735 # ----------------------------------
1736 if selfA._parameters["StoreInternalVariables"] \
1737 or selfA._toStore("CostFunctionJ") \
1738 or selfA._toStore("CostFunctionJb") \
1739 or selfA._toStore("CostFunctionJo") \
1740 or selfA._toStore("CurrentOptimum") \
1741 or selfA._toStore("APosterioriCovariance"):
1748 __m = selfA._parameters["NumberOfMembers"]
1749 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1751 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1753 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1755 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1756 selfA.StoredVariables["Analysis"].store( Xb )
1757 if selfA._toStore("APosterioriCovariance"):
1758 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1761 previousJMinimum = numpy.finfo(float).max
1763 for step in range(duration-1):
1764 if hasattr(Y,"store"):
1765 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1767 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1770 if hasattr(U,"store") and len(U)>1:
1771 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1772 elif hasattr(U,"store") and len(U)==1:
1773 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1775 Un = numpy.asmatrix(numpy.ravel( U )).T
1779 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1780 Xn = CovarianceInflation( Xn,
1781 selfA._parameters["InflationType"],
1782 selfA._parameters["InflationFactor"],
1785 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1786 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1788 returnSerieAsArrayMatrix = True )
1789 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
1790 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1791 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1792 Xn_predicted = Xn_predicted + Cm * Un
1793 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1794 # --- > Par principe, M = Id, Q = 0
1797 #--------------------------
1798 if VariantM == "MLEF13":
1799 Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
1800 EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
1801 Ua = numpy.identity(__m)
1805 Ta = numpy.identity(__m)
1806 vw = numpy.zeros(__m)
1807 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1808 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1811 E1 = vx1 + _epsilon * EaX
1813 E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1815 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1817 returnSerieAsArrayMatrix = True )
1818 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1821 EaY = (HE2 - vy2) / _epsilon
1823 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1825 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
1826 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
1827 Deltaw = - numpy.linalg.solve(mH,GradJ)
1832 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1837 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1839 Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
1840 #--------------------------
1842 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1844 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1845 Xn = CovarianceInflation( Xn,
1846 selfA._parameters["InflationType"],
1847 selfA._parameters["InflationFactor"],
1850 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1851 #--------------------------
1853 if selfA._parameters["StoreInternalVariables"] \
1854 or selfA._toStore("CostFunctionJ") \
1855 or selfA._toStore("CostFunctionJb") \
1856 or selfA._toStore("CostFunctionJo") \
1857 or selfA._toStore("APosterioriCovariance") \
1858 or selfA._toStore("InnovationAtCurrentAnalysis") \
1859 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1860 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1861 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1862 _Innovation = Ynpu - _HXa
1864 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1865 # ---> avec analysis
1866 selfA.StoredVariables["Analysis"].store( Xa )
1867 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1868 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1869 if selfA._toStore("InnovationAtCurrentAnalysis"):
1870 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1871 # ---> avec current state
1872 if selfA._parameters["StoreInternalVariables"] \
1873 or selfA._toStore("CurrentState"):
1874 selfA.StoredVariables["CurrentState"].store( Xn )
1875 if selfA._toStore("ForecastState"):
1876 selfA.StoredVariables["ForecastState"].store( EMX )
1877 if selfA._toStore("BMA"):
1878 selfA.StoredVariables["BMA"].store( EMX - Xa )
1879 if selfA._toStore("InnovationAtCurrentState"):
1880 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1881 if selfA._toStore("SimulatedObservationAtCurrentState") \
1882 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1883 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1885 if selfA._parameters["StoreInternalVariables"] \
1886 or selfA._toStore("CostFunctionJ") \
1887 or selfA._toStore("CostFunctionJb") \
1888 or selfA._toStore("CostFunctionJo") \
1889 or selfA._toStore("CurrentOptimum") \
1890 or selfA._toStore("APosterioriCovariance"):
1891 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1892 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1894 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1895 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1896 selfA.StoredVariables["CostFunctionJ" ].store( J )
1898 if selfA._toStore("IndexOfOptimum") \
1899 or selfA._toStore("CurrentOptimum") \
1900 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1901 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1902 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1903 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1904 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1905 if selfA._toStore("IndexOfOptimum"):
1906 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1907 if selfA._toStore("CurrentOptimum"):
1908 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1909 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1910 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1911 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1912 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1913 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1914 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1915 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1916 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1917 if selfA._toStore("APosterioriCovariance"):
1918 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1919 if selfA._parameters["EstimationOf"] == "Parameters" \
1920 and J < previousJMinimum:
1921 previousJMinimum = J
1923 if selfA._toStore("APosterioriCovariance"):
1924 covarianceXaMin = Pn
1926 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1927 # ----------------------------------------------------------------------
1928 if selfA._parameters["EstimationOf"] == "Parameters":
1929 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1930 selfA.StoredVariables["Analysis"].store( XaMin )
1931 if selfA._toStore("APosterioriCovariance"):
1932 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1933 if selfA._toStore("BMA"):
1934 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1938 # ==============================================================================
1950 Implémentation informatique de l'algorithme MMQR, basée sur la publication :
1951 David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
1952 Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
1955 # Recuperation des donnees et informations initiales
1956 # --------------------------------------------------
1957 variables = numpy.ravel( x0 )
1958 mesures = numpy.ravel( y )
1959 increment = sys.float_info[0]
1962 quantile = float(quantile)
1964 # Calcul des parametres du MM
1965 # ---------------------------
1966 tn = float(toler) / n
1967 e0 = -tn / math.log(tn)
1968 epsilon = (e0-tn)/(1+math.log(e0))
1970 # Calculs d'initialisation
1971 # ------------------------
1972 residus = mesures - numpy.ravel( func( variables ) )
1973 poids = 1./(epsilon+numpy.abs(residus))
1974 veps = 1. - 2. * quantile - residus * poids
1975 lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
1978 # Recherche iterative
1979 # -------------------
1980 while (increment > toler) and (iteration < maxfun) :
1983 Derivees = numpy.array(fprime(variables))
1984 Derivees = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
1985 DeriveesT = Derivees.transpose()
1986 M = numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
1987 SM = numpy.transpose(numpy.dot( DeriveesT , veps ))
1988 step = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
1990 variables = variables + step
1991 if bounds is not None:
1992 # Attention : boucle infinie à éviter si un intervalle est trop petit
1993 while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
1995 variables = variables - step
1996 residus = mesures - numpy.ravel( func(variables) )
1997 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
1999 while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
2001 variables = variables - step
2002 residus = mesures - numpy.ravel( func(variables) )
2003 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2005 increment = lastsurrogate-surrogate
2006 poids = 1./(epsilon+numpy.abs(residus))
2007 veps = 1. - 2. * quantile - residus * poids
2008 lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
2012 Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
2014 return variables, Ecart, [n,p,iteration,increment,0]
2016 # ==============================================================================
2017 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
2019 3DVAR multi-pas et multi-méthodes
2024 Xn = numpy.ravel(Xb).reshape((-1,1))
2026 if selfA._parameters["EstimationOf"] == "State":
2027 M = EM["Direct"].appliedTo
2029 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2030 selfA.StoredVariables["Analysis"].store( Xn )
2031 if selfA._toStore("APosterioriCovariance"):
2032 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(Xn.size)
2034 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2035 if selfA._toStore("ForecastState"):
2036 selfA.StoredVariables["ForecastState"].store( Xn )
2038 if hasattr(Y,"stepnumber"):
2039 duration = Y.stepnumber()
2045 for step in range(duration-1):
2046 if hasattr(Y,"store"):
2047 Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
2049 Ynpu = numpy.ravel( Y ).reshape((-1,1))
2051 if selfA._parameters["EstimationOf"] == "State": # Forecast
2052 Xn = selfA.StoredVariables["Analysis"][-1]
2053 Xn_predicted = M( Xn )
2054 if selfA._toStore("ForecastState"):
2055 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
2056 elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
2057 # --- > Par principe, M = Id, Q = 0
2059 Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
2061 oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
2065 # ==============================================================================
2066 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2075 Hm = HO["Direct"].appliedTo
2077 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
2078 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2079 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2082 HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2083 if Y.size != HXb.size:
2084 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2085 if max(Y.shape) != max(HXb.shape):
2086 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2088 if selfA._toStore("JacobianMatrixAtBackground"):
2089 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2090 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2091 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2093 Ht = HO["Tangent"].asMatrix(Xb)
2095 HBHTpR = R + Ht * BHT
2096 Innovation = Y - HXb
2098 # Point de démarrage de l'optimisation
2099 Xini = numpy.zeros(Xb.shape)
2101 # Définition de la fonction-coût
2102 # ------------------------------
2103 def CostFunction(w):
2104 _W = numpy.asmatrix(numpy.ravel( w )).T
2105 if selfA._parameters["StoreInternalVariables"] or \
2106 selfA._toStore("CurrentState") or \
2107 selfA._toStore("CurrentOptimum"):
2108 selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
2109 if selfA._toStore("SimulatedObservationAtCurrentState") or \
2110 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2111 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
2112 if selfA._toStore("InnovationAtCurrentState"):
2113 selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
2115 Jb = float( 0.5 * _W.T * HBHTpR * _W )
2116 Jo = float( - _W.T * Innovation )
2119 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2120 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2121 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2122 selfA.StoredVariables["CostFunctionJ" ].store( J )
2123 if selfA._toStore("IndexOfOptimum") or \
2124 selfA._toStore("CurrentOptimum") or \
2125 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2126 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2127 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2128 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2129 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2130 if selfA._toStore("IndexOfOptimum"):
2131 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2132 if selfA._toStore("CurrentOptimum"):
2133 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2134 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2135 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2136 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2137 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2138 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2139 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2140 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2141 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2144 def GradientOfCostFunction(w):
2145 _W = numpy.asmatrix(numpy.ravel( w )).T
2146 GradJb = HBHTpR * _W
2147 GradJo = - Innovation
2148 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2151 # Minimisation de la fonctionnelle
2152 # --------------------------------
2153 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2155 if selfA._parameters["Minimizer"] == "LBFGSB":
2156 if "0.19" <= scipy.version.version <= "1.1.0":
2157 import lbfgsbhlt as optimiseur
2159 import scipy.optimize as optimiseur
2160 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2161 func = CostFunction,
2163 fprime = GradientOfCostFunction,
2165 bounds = selfA._parameters["Bounds"],
2166 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
2167 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
2168 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2169 iprint = selfA._parameters["optiprint"],
2171 nfeval = Informations['funcalls']
2172 rc = Informations['warnflag']
2173 elif selfA._parameters["Minimizer"] == "TNC":
2174 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2175 func = CostFunction,
2177 fprime = GradientOfCostFunction,
2179 bounds = selfA._parameters["Bounds"],
2180 maxfun = selfA._parameters["MaximumNumberOfSteps"],
2181 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2182 ftol = selfA._parameters["CostDecrementTolerance"],
2183 messages = selfA._parameters["optmessages"],
2185 elif selfA._parameters["Minimizer"] == "CG":
2186 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2189 fprime = GradientOfCostFunction,
2191 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2192 gtol = selfA._parameters["GradientNormTolerance"],
2193 disp = selfA._parameters["optdisp"],
2196 elif selfA._parameters["Minimizer"] == "NCG":
2197 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2200 fprime = GradientOfCostFunction,
2202 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2203 avextol = selfA._parameters["CostDecrementTolerance"],
2204 disp = selfA._parameters["optdisp"],
2207 elif selfA._parameters["Minimizer"] == "BFGS":
2208 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2211 fprime = GradientOfCostFunction,
2213 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2214 gtol = selfA._parameters["GradientNormTolerance"],
2215 disp = selfA._parameters["optdisp"],
2219 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2221 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2222 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2224 # Correction pour pallier a un bug de TNC sur le retour du Minimum
2225 # ----------------------------------------------------------------
2226 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2227 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2228 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
2230 Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
2232 # Obtention de l'analyse
2233 # ----------------------
2236 selfA.StoredVariables["Analysis"].store( Xa )
2238 if selfA._toStore("OMA") or \
2239 selfA._toStore("SigmaObs2") or \
2240 selfA._toStore("SimulationQuantiles") or \
2241 selfA._toStore("SimulatedObservationAtOptimum"):
2242 if selfA._toStore("SimulatedObservationAtCurrentState"):
2243 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2244 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2245 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2249 # Calcul de la covariance d'analyse
2250 # ---------------------------------
2251 if selfA._toStore("APosterioriCovariance") or \
2252 selfA._toStore("SimulationQuantiles") or \
2253 selfA._toStore("JacobianMatrixAtOptimum") or \
2254 selfA._toStore("KalmanGainAtOptimum"):
2255 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2256 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2257 if selfA._toStore("APosterioriCovariance") or \
2258 selfA._toStore("SimulationQuantiles") or \
2259 selfA._toStore("KalmanGainAtOptimum"):
2260 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2261 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2262 if selfA._toStore("APosterioriCovariance") or \
2263 selfA._toStore("SimulationQuantiles"):
2269 _ee = numpy.matrix(numpy.zeros(nb)).T
2271 _HtEE = numpy.dot(HtM,_ee)
2272 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
2273 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2274 HessienneI = numpy.matrix( HessienneI )
2276 if min(A.shape) != max(A.shape):
2277 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2278 if (numpy.diag(A) < 0).any():
2279 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2280 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2282 L = numpy.linalg.cholesky( A )
2284 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2285 if selfA._toStore("APosterioriCovariance"):
2286 selfA.StoredVariables["APosterioriCovariance"].store( A )
2287 if selfA._toStore("JacobianMatrixAtOptimum"):
2288 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2289 if selfA._toStore("KalmanGainAtOptimum"):
2290 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2291 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2292 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2294 # Calculs et/ou stockages supplémentaires
2295 # ---------------------------------------
2296 if selfA._toStore("Innovation") or \
2297 selfA._toStore("SigmaObs2") or \
2298 selfA._toStore("MahalanobisConsistency") or \
2299 selfA._toStore("OMB"):
2301 if selfA._toStore("Innovation"):
2302 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2303 if selfA._toStore("BMA"):
2304 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2305 if selfA._toStore("OMA"):
2306 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2307 if selfA._toStore("OMB"):
2308 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2309 if selfA._toStore("SigmaObs2"):
2310 TraceR = R.trace(Y.size)
2311 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2312 if selfA._toStore("MahalanobisConsistency"):
2313 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2314 if selfA._toStore("SimulationQuantiles"):
2315 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
2316 if selfA._toStore("SimulatedObservationAtBackground"):
2317 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2318 if selfA._toStore("SimulatedObservationAtOptimum"):
2319 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2323 # ==============================================================================
2324 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
2328 if selfA._parameters["EstimationOf"] == "Parameters":
2329 selfA._parameters["StoreInternalVariables"] = True
2332 H = HO["Direct"].appliedControledFormTo
2334 if selfA._parameters["EstimationOf"] == "State":
2335 M = EM["Direct"].appliedControledFormTo
2337 if CM is not None and "Tangent" in CM and U is not None:
2338 Cm = CM["Tangent"].asMatrix(Xb)
2342 # Durée d'observation et tailles
2343 if hasattr(Y,"stepnumber"):
2344 duration = Y.stepnumber()
2345 __p = numpy.cumprod(Y.shape())[-1]
2348 __p = numpy.array(Y).size
2350 # Précalcul des inversions de B et R
2351 if selfA._parameters["StoreInternalVariables"] \
2352 or selfA._toStore("CostFunctionJ") \
2353 or selfA._toStore("CostFunctionJb") \
2354 or selfA._toStore("CostFunctionJo") \
2355 or selfA._toStore("CurrentOptimum") \
2356 or selfA._toStore("APosterioriCovariance"):
2361 __m = selfA._parameters["NumberOfMembers"]
2363 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2365 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2367 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2369 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2370 selfA.StoredVariables["Analysis"].store( Xb )
2371 if selfA._toStore("APosterioriCovariance"):
2372 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2375 previousJMinimum = numpy.finfo(float).max
2377 for step in range(duration-1):
2378 if hasattr(Y,"store"):
2379 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2381 Ynpu = numpy.ravel( Y ).reshape((__p,1))
2384 if hasattr(U,"store") and len(U)>1:
2385 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2386 elif hasattr(U,"store") and len(U)==1:
2387 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2389 Un = numpy.asmatrix(numpy.ravel( U )).T
2393 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2394 Xn = CovarianceInflation( Xn,
2395 selfA._parameters["InflationType"],
2396 selfA._parameters["InflationFactor"],
2399 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2400 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2402 returnSerieAsArrayMatrix = True )
2403 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
2404 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2406 returnSerieAsArrayMatrix = True )
2407 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2408 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2409 Xn_predicted = Xn_predicted + Cm * Un
2410 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2411 # --- > Par principe, M = Id, Q = 0
2413 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2415 returnSerieAsArrayMatrix = True )
2417 # Mean of forecast and observation of forecast
2418 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2419 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2421 #--------------------------
2422 if VariantM == "KalmanFilterFormula05":
2423 PfHT, HPfHT = 0., 0.
2424 for i in range(__m):
2425 Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
2426 Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
2427 PfHT += Exfi * Eyfi.T
2428 HPfHT += Eyfi * Eyfi.T
2429 PfHT = (1./(__m-1)) * PfHT
2430 HPfHT = (1./(__m-1)) * HPfHT
2431 Kn = PfHT * ( R + HPfHT ).I
2434 for i in range(__m):
2435 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
2436 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
2437 #--------------------------
2438 elif VariantM == "KalmanFilterFormula16":
2439 EpY = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
2440 EpYm = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2442 EaX = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
2443 EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
2445 Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
2447 for i in range(__m):
2448 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
2449 #--------------------------
2451 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2453 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2454 Xn = CovarianceInflation( Xn,
2455 selfA._parameters["InflationType"],
2456 selfA._parameters["InflationFactor"],
2459 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2460 #--------------------------
2462 if selfA._parameters["StoreInternalVariables"] \
2463 or selfA._toStore("CostFunctionJ") \
2464 or selfA._toStore("CostFunctionJb") \
2465 or selfA._toStore("CostFunctionJo") \
2466 or selfA._toStore("APosterioriCovariance") \
2467 or selfA._toStore("InnovationAtCurrentAnalysis") \
2468 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2469 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2470 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2471 _Innovation = Ynpu - _HXa
2473 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2474 # ---> avec analysis
2475 selfA.StoredVariables["Analysis"].store( Xa )
2476 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2477 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2478 if selfA._toStore("InnovationAtCurrentAnalysis"):
2479 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2480 # ---> avec current state
2481 if selfA._parameters["StoreInternalVariables"] \
2482 or selfA._toStore("CurrentState"):
2483 selfA.StoredVariables["CurrentState"].store( Xn )
2484 if selfA._toStore("ForecastState"):
2485 selfA.StoredVariables["ForecastState"].store( EMX )
2486 if selfA._toStore("BMA"):
2487 selfA.StoredVariables["BMA"].store( EMX - Xa )
2488 if selfA._toStore("InnovationAtCurrentState"):
2489 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
2490 if selfA._toStore("SimulatedObservationAtCurrentState") \
2491 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2492 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2494 if selfA._parameters["StoreInternalVariables"] \
2495 or selfA._toStore("CostFunctionJ") \
2496 or selfA._toStore("CostFunctionJb") \
2497 or selfA._toStore("CostFunctionJo") \
2498 or selfA._toStore("CurrentOptimum") \
2499 or selfA._toStore("APosterioriCovariance"):
2500 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2501 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2503 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2504 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2505 selfA.StoredVariables["CostFunctionJ" ].store( J )
2507 if selfA._toStore("IndexOfOptimum") \
2508 or selfA._toStore("CurrentOptimum") \
2509 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2510 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2511 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2512 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2513 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2514 if selfA._toStore("IndexOfOptimum"):
2515 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2516 if selfA._toStore("CurrentOptimum"):
2517 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2518 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2519 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2520 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2521 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2522 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2523 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2524 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2525 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2526 if selfA._toStore("APosterioriCovariance"):
2527 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2528 if selfA._parameters["EstimationOf"] == "Parameters" \
2529 and J < previousJMinimum:
2530 previousJMinimum = J
2532 if selfA._toStore("APosterioriCovariance"):
2533 covarianceXaMin = Pn
2535 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2536 # ----------------------------------------------------------------------
2537 if selfA._parameters["EstimationOf"] == "Parameters":
2538 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2539 selfA.StoredVariables["Analysis"].store( XaMin )
2540 if selfA._toStore("APosterioriCovariance"):
2541 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2542 if selfA._toStore("BMA"):
2543 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2547 # ==============================================================================
2548 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2557 Hm = HO["Direct"].appliedTo
2558 Ha = HO["Adjoint"].appliedInXTo
2560 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
2561 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2562 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2565 HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2566 if Y.size != HXb.size:
2567 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2568 if max(Y.shape) != max(HXb.shape):
2569 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2571 if selfA._toStore("JacobianMatrixAtBackground"):
2572 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2573 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2574 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2576 # Précalcul des inversions de B et R
2580 # Point de démarrage de l'optimisation
2581 Xini = selfA._parameters["InitializationPoint"]
2583 # Définition de la fonction-coût
2584 # ------------------------------
2585 def CostFunction(x):
2586 _X = numpy.asmatrix(numpy.ravel( x )).T
2587 if selfA._parameters["StoreInternalVariables"] or \
2588 selfA._toStore("CurrentState") or \
2589 selfA._toStore("CurrentOptimum"):
2590 selfA.StoredVariables["CurrentState"].store( _X )
2592 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2593 _Innovation = Y - _HX
2594 if selfA._toStore("SimulatedObservationAtCurrentState") or \
2595 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2596 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
2597 if selfA._toStore("InnovationAtCurrentState"):
2598 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
2600 Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
2601 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2604 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2605 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2606 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2607 selfA.StoredVariables["CostFunctionJ" ].store( J )
2608 if selfA._toStore("IndexOfOptimum") or \
2609 selfA._toStore("CurrentOptimum") or \
2610 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2611 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2612 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2613 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2614 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2615 if selfA._toStore("IndexOfOptimum"):
2616 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2617 if selfA._toStore("CurrentOptimum"):
2618 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2619 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2620 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2621 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2622 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2623 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2624 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2625 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2626 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2629 def GradientOfCostFunction(x):
2630 _X = numpy.asmatrix(numpy.ravel( x )).T
2632 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2633 GradJb = BI * (_X - Xb)
2634 GradJo = - Ha( (_X, RI * (Y - _HX)) )
2635 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2638 # Minimisation de la fonctionnelle
2639 # --------------------------------
2640 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2642 if selfA._parameters["Minimizer"] == "LBFGSB":
2643 if "0.19" <= scipy.version.version <= "1.1.0":
2644 import lbfgsbhlt as optimiseur
2646 import scipy.optimize as optimiseur
2647 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2648 func = CostFunction,
2650 fprime = GradientOfCostFunction,
2652 bounds = selfA._parameters["Bounds"],
2653 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
2654 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
2655 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2656 iprint = selfA._parameters["optiprint"],
2658 nfeval = Informations['funcalls']
2659 rc = Informations['warnflag']
2660 elif selfA._parameters["Minimizer"] == "TNC":
2661 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2662 func = CostFunction,
2664 fprime = GradientOfCostFunction,
2666 bounds = selfA._parameters["Bounds"],
2667 maxfun = selfA._parameters["MaximumNumberOfSteps"],
2668 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2669 ftol = selfA._parameters["CostDecrementTolerance"],
2670 messages = selfA._parameters["optmessages"],
2672 elif selfA._parameters["Minimizer"] == "CG":
2673 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2676 fprime = GradientOfCostFunction,
2678 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2679 gtol = selfA._parameters["GradientNormTolerance"],
2680 disp = selfA._parameters["optdisp"],
2683 elif selfA._parameters["Minimizer"] == "NCG":
2684 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2687 fprime = GradientOfCostFunction,
2689 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2690 avextol = selfA._parameters["CostDecrementTolerance"],
2691 disp = selfA._parameters["optdisp"],
2694 elif selfA._parameters["Minimizer"] == "BFGS":
2695 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2698 fprime = GradientOfCostFunction,
2700 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2701 gtol = selfA._parameters["GradientNormTolerance"],
2702 disp = selfA._parameters["optdisp"],
2706 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2708 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2709 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2711 # Correction pour pallier a un bug de TNC sur le retour du Minimum
2712 # ----------------------------------------------------------------
2713 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2714 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2716 # Obtention de l'analyse
2717 # ----------------------
2718 Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
2720 selfA.StoredVariables["Analysis"].store( Xa )
2722 if selfA._toStore("OMA") or \
2723 selfA._toStore("SigmaObs2") or \
2724 selfA._toStore("SimulationQuantiles") or \
2725 selfA._toStore("SimulatedObservationAtOptimum"):
2726 if selfA._toStore("SimulatedObservationAtCurrentState"):
2727 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2728 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2729 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2733 # Calcul de la covariance d'analyse
2734 # ---------------------------------
2735 if selfA._toStore("APosterioriCovariance") or \
2736 selfA._toStore("SimulationQuantiles") or \
2737 selfA._toStore("JacobianMatrixAtOptimum") or \
2738 selfA._toStore("KalmanGainAtOptimum"):
2739 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2740 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2741 if selfA._toStore("APosterioriCovariance") or \
2742 selfA._toStore("SimulationQuantiles") or \
2743 selfA._toStore("KalmanGainAtOptimum"):
2744 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2745 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2746 if selfA._toStore("APosterioriCovariance") or \
2747 selfA._toStore("SimulationQuantiles"):
2751 _ee = numpy.matrix(numpy.zeros(nb)).T
2753 _HtEE = numpy.dot(HtM,_ee)
2754 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
2755 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2756 HessienneI = numpy.matrix( HessienneI )
2758 if min(A.shape) != max(A.shape):
2759 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2760 if (numpy.diag(A) < 0).any():
2761 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2762 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2764 L = numpy.linalg.cholesky( A )
2766 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2767 if selfA._toStore("APosterioriCovariance"):
2768 selfA.StoredVariables["APosterioriCovariance"].store( A )
2769 if selfA._toStore("JacobianMatrixAtOptimum"):
2770 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2771 if selfA._toStore("KalmanGainAtOptimum"):
2772 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2773 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2774 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2776 # Calculs et/ou stockages supplémentaires
2777 # ---------------------------------------
2778 if selfA._toStore("Innovation") or \
2779 selfA._toStore("SigmaObs2") or \
2780 selfA._toStore("MahalanobisConsistency") or \
2781 selfA._toStore("OMB"):
2783 if selfA._toStore("Innovation"):
2784 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2785 if selfA._toStore("BMA"):
2786 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2787 if selfA._toStore("OMA"):
2788 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2789 if selfA._toStore("OMB"):
2790 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2791 if selfA._toStore("SigmaObs2"):
2792 TraceR = R.trace(Y.size)
2793 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2794 if selfA._toStore("MahalanobisConsistency"):
2795 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2796 if selfA._toStore("SimulationQuantiles"):
2797 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
2798 if selfA._toStore("SimulatedObservationAtBackground"):
2799 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2800 if selfA._toStore("SimulatedObservationAtOptimum"):
2801 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2805 # ==============================================================================
2806 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2815 Hm = HO["Direct"].appliedControledFormTo
2816 Mm = EM["Direct"].appliedControledFormTo
2818 if CM is not None and "Tangent" in CM and U is not None:
2819 Cm = CM["Tangent"].asMatrix(Xb)
2825 if hasattr(U,"store") and 1<=_step<len(U) :
2826 _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
2827 elif hasattr(U,"store") and len(U)==1:
2828 _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2830 _Un = numpy.asmatrix(numpy.ravel( U )).T
2835 if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
2836 _Cm = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
2842 # Remarque : les observations sont exploitées à partir du pas de temps
2843 # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
2844 # Donc le pas 0 n'est pas utilisé puisque la première étape commence
2845 # avec l'observation du pas 1.
2847 # Nombre de pas identique au nombre de pas d'observations
2848 if hasattr(Y,"stepnumber"):
2849 duration = Y.stepnumber()
2853 # Précalcul des inversions de B et R
2857 # Point de démarrage de l'optimisation
2858 Xini = selfA._parameters["InitializationPoint"]
2860 # Définition de la fonction-coût
2861 # ------------------------------
2862 selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
2863 selfA.DirectInnovation = [None,] # Le pas 0 n'est pas observé
2864 def CostFunction(x):
2865 _X = numpy.asmatrix(numpy.ravel( x )).T
2866 if selfA._parameters["StoreInternalVariables"] or \
2867 selfA._toStore("CurrentState") or \
2868 selfA._toStore("CurrentOptimum"):
2869 selfA.StoredVariables["CurrentState"].store( _X )
2870 Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
2871 selfA.DirectCalculation = [None,]
2872 selfA.DirectInnovation = [None,]
2875 for step in range(0,duration-1):
2876 if hasattr(Y,"store"):
2877 _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
2879 _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
2883 if selfA._parameters["EstimationOf"] == "State":
2884 _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
2885 elif selfA._parameters["EstimationOf"] == "Parameters":
2888 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
2889 _Xn = numpy.max(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
2890 _Xn = numpy.min(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
2892 # Etape de différence aux observations
2893 if selfA._parameters["EstimationOf"] == "State":
2894 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
2895 elif selfA._parameters["EstimationOf"] == "Parameters":
2896 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
2898 # Stockage de l'état
2899 selfA.DirectCalculation.append( _Xn )
2900 selfA.DirectInnovation.append( _YmHMX )
2902 # Ajout dans la fonctionnelle d'observation
2903 Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
2906 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2907 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2908 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2909 selfA.StoredVariables["CostFunctionJ" ].store( J )
2910 if selfA._toStore("IndexOfOptimum") or \
2911 selfA._toStore("CurrentOptimum") or \
2912 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2913 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2914 selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2915 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2916 if selfA._toStore("IndexOfOptimum"):
2917 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2918 if selfA._toStore("CurrentOptimum"):
2919 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2920 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2921 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2922 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2923 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2924 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2925 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2928 def GradientOfCostFunction(x):
2929 _X = numpy.asmatrix(numpy.ravel( x )).T
2930 GradJb = BI * (_X - Xb)
2932 for step in range(duration-1,0,-1):
2933 # Étape de récupération du dernier stockage de l'évolution
2934 _Xn = selfA.DirectCalculation.pop()
2935 # Étape de récupération du dernier stockage de l'innovation
2936 _YmHMX = selfA.DirectInnovation.pop()
2937 # Calcul des adjoints
2938 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
2939 Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
2940 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
2941 Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
2942 # Calcul du gradient par état adjoint
2943 GradJo = GradJo + Ha * RI * _YmHMX # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
2944 GradJo = Ma * GradJo # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
2945 GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
2948 # Minimisation de la fonctionnelle
2949 # --------------------------------
2950 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2952 if selfA._parameters["Minimizer"] == "LBFGSB":
2953 if "0.19" <= scipy.version.version <= "1.1.0":
2954 import lbfgsbhlt as optimiseur
2956 import scipy.optimize as optimiseur
2957 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2958 func = CostFunction,
2960 fprime = GradientOfCostFunction,
2962 bounds = selfA._parameters["Bounds"],
2963 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
2964 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
2965 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2966 iprint = selfA._parameters["optiprint"],
2968 nfeval = Informations['funcalls']
2969 rc = Informations['warnflag']
2970 elif selfA._parameters["Minimizer"] == "TNC":
2971 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2972 func = CostFunction,
2974 fprime = GradientOfCostFunction,
2976 bounds = selfA._parameters["Bounds"],
2977 maxfun = selfA._parameters["MaximumNumberOfSteps"],
2978 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2979 ftol = selfA._parameters["CostDecrementTolerance"],
2980 messages = selfA._parameters["optmessages"],
2982 elif selfA._parameters["Minimizer"] == "CG":
2983 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2986 fprime = GradientOfCostFunction,
2988 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2989 gtol = selfA._parameters["GradientNormTolerance"],
2990 disp = selfA._parameters["optdisp"],
2993 elif selfA._parameters["Minimizer"] == "NCG":
2994 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2997 fprime = GradientOfCostFunction,
2999 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3000 avextol = selfA._parameters["CostDecrementTolerance"],
3001 disp = selfA._parameters["optdisp"],
3004 elif selfA._parameters["Minimizer"] == "BFGS":
3005 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3008 fprime = GradientOfCostFunction,
3010 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3011 gtol = selfA._parameters["GradientNormTolerance"],
3012 disp = selfA._parameters["optdisp"],
3016 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3018 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3019 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3021 # Correction pour pallier a un bug de TNC sur le retour du Minimum
3022 # ----------------------------------------------------------------
3023 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3024 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3026 # Obtention de l'analyse
3027 # ----------------------
3028 Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
3030 selfA.StoredVariables["Analysis"].store( Xa )
3032 # Calculs et/ou stockages supplémentaires
3033 # ---------------------------------------
3034 if selfA._toStore("BMA"):
3035 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3039 # ==============================================================================
3040 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3042 3DVAR variational analysis with no inversion of B
3049 Hm = HO["Direct"].appliedTo
3050 Ha = HO["Adjoint"].appliedInXTo
3052 # Précalcul des inversions de B et R
3056 # Point de démarrage de l'optimisation
3057 Xini = numpy.zeros(Xb.shape)
3059 # Définition de la fonction-coût
3060 # ------------------------------
3061 def CostFunction(v):
3062 _V = numpy.asmatrix(numpy.ravel( v )).T
3064 if selfA._parameters["StoreInternalVariables"] or \
3065 selfA._toStore("CurrentState") or \
3066 selfA._toStore("CurrentOptimum"):
3067 selfA.StoredVariables["CurrentState"].store( _X )
3069 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
3070 _Innovation = Y - _HX
3071 if selfA._toStore("SimulatedObservationAtCurrentState") or \
3072 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3073 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
3074 if selfA._toStore("InnovationAtCurrentState"):
3075 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3077 Jb = float( 0.5 * _V.T * BT * _V )
3078 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
3081 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3082 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3083 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3084 selfA.StoredVariables["CostFunctionJ" ].store( J )
3085 if selfA._toStore("IndexOfOptimum") or \
3086 selfA._toStore("CurrentOptimum") or \
3087 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3088 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3089 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
3090 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3091 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3092 if selfA._toStore("IndexOfOptimum"):
3093 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3094 if selfA._toStore("CurrentOptimum"):
3095 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3096 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3097 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
3098 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3099 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3100 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3101 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3102 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3103 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3106 def GradientOfCostFunction(v):
3107 _V = numpy.asmatrix(numpy.ravel( v )).T
3110 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
3112 GradJo = - Ha( (_X, RI * (Y - _HX)) )
3113 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
3116 # Minimisation de la fonctionnelle
3117 # --------------------------------
3118 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3120 if selfA._parameters["Minimizer"] == "LBFGSB":
3121 if "0.19" <= scipy.version.version <= "1.1.0":
3122 import lbfgsbhlt as optimiseur
3124 import scipy.optimize as optimiseur
3125 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3126 func = CostFunction,
3128 fprime = GradientOfCostFunction,
3130 bounds = selfA._parameters["Bounds"],
3131 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
3132 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
3133 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3134 iprint = selfA._parameters["optiprint"],
3136 nfeval = Informations['funcalls']
3137 rc = Informations['warnflag']
3138 elif selfA._parameters["Minimizer"] == "TNC":
3139 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3140 func = CostFunction,
3142 fprime = GradientOfCostFunction,
3144 bounds = selfA._parameters["Bounds"],
3145 maxfun = selfA._parameters["MaximumNumberOfSteps"],
3146 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3147 ftol = selfA._parameters["CostDecrementTolerance"],
3148 messages = selfA._parameters["optmessages"],
3150 elif selfA._parameters["Minimizer"] == "CG":
3151 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3154 fprime = GradientOfCostFunction,
3156 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3157 gtol = selfA._parameters["GradientNormTolerance"],
3158 disp = selfA._parameters["optdisp"],
3161 elif selfA._parameters["Minimizer"] == "NCG":
3162 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3165 fprime = GradientOfCostFunction,
3167 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3168 avextol = selfA._parameters["CostDecrementTolerance"],
3169 disp = selfA._parameters["optdisp"],
3172 elif selfA._parameters["Minimizer"] == "BFGS":
3173 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3176 fprime = GradientOfCostFunction,
3178 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3179 gtol = selfA._parameters["GradientNormTolerance"],
3180 disp = selfA._parameters["optdisp"],
3184 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3186 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3187 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3189 # Correction pour pallier a un bug de TNC sur le retour du Minimum
3190 # ----------------------------------------------------------------
3191 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3192 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3193 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
3195 Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
3197 # Obtention de l'analyse
3198 # ----------------------
3201 selfA.StoredVariables["Analysis"].store( Xa )
3203 if selfA._toStore("OMA") or \
3204 selfA._toStore("SigmaObs2") or \
3205 selfA._toStore("SimulationQuantiles") or \
3206 selfA._toStore("SimulatedObservationAtOptimum"):
3207 if selfA._toStore("SimulatedObservationAtCurrentState"):
3208 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3209 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3210 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3214 # Calcul de la covariance d'analyse
3215 # ---------------------------------
3216 if selfA._toStore("APosterioriCovariance") or \
3217 selfA._toStore("SimulationQuantiles") or \
3218 selfA._toStore("JacobianMatrixAtOptimum") or \
3219 selfA._toStore("KalmanGainAtOptimum"):
3220 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3221 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3222 if selfA._toStore("APosterioriCovariance") or \
3223 selfA._toStore("SimulationQuantiles") or \
3224 selfA._toStore("KalmanGainAtOptimum"):
3225 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3226 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3227 if selfA._toStore("APosterioriCovariance") or \
3228 selfA._toStore("SimulationQuantiles"):
3233 _ee = numpy.matrix(numpy.zeros(nb)).T
3235 _HtEE = numpy.dot(HtM,_ee)
3236 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
3237 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
3238 HessienneI = numpy.matrix( HessienneI )
3240 if min(A.shape) != max(A.shape):
3241 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
3242 if (numpy.diag(A) < 0).any():
3243 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
3244 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
3246 L = numpy.linalg.cholesky( A )
3248 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
3249 if selfA._toStore("APosterioriCovariance"):
3250 selfA.StoredVariables["APosterioriCovariance"].store( A )
3251 if selfA._toStore("JacobianMatrixAtOptimum"):
3252 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3253 if selfA._toStore("KalmanGainAtOptimum"):
3254 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3255 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3256 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3258 # Calculs et/ou stockages supplémentaires
3259 # ---------------------------------------
3260 if selfA._toStore("Innovation") or \
3261 selfA._toStore("SigmaObs2") or \
3262 selfA._toStore("MahalanobisConsistency") or \
3263 selfA._toStore("OMB"):
3265 if selfA._toStore("Innovation"):
3266 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
3267 if selfA._toStore("BMA"):
3268 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3269 if selfA._toStore("OMA"):
3270 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3271 if selfA._toStore("OMB"):
3272 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
3273 if selfA._toStore("SigmaObs2"):
3274 TraceR = R.trace(Y.size)
3275 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
3276 if selfA._toStore("MahalanobisConsistency"):
3277 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3278 if selfA._toStore("SimulationQuantiles"):
3279 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
3280 if selfA._toStore("SimulatedObservationAtBackground"):
3281 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
3282 if selfA._toStore("SimulatedObservationAtOptimum"):
3283 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
3287 # ==============================================================================
3288 if __name__ == "__main__":
3289 print('\n AUTODIAGNOSTIC\n')