1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les objets numériques génériques.
26 __author__ = "Jean-Philippe ARGAUD"
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
36 # ==============================================================================
37 def ExecuteFunction( triplet ):
38 assert len(triplet) == 3, "Incorrect number of arguments"
39 X, xArgs, funcrepr = triplet
40 __X = numpy.asmatrix(numpy.ravel( X )).T
41 __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42 __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43 __fonction = getattr(__module,funcrepr["__userFunction__name"])
44 sys.path = __sys_path_tmp ; del __sys_path_tmp
45 if isinstance(xArgs, dict):
46 __HX = __fonction( __X, **xArgs )
48 __HX = __fonction( __X )
49 return numpy.ravel( __HX )
51 # ==============================================================================
52 class FDApproximation(object):
54 Cette classe sert d'interface pour définir les opérateurs approximés. A la
55 création d'un objet, en fournissant une fonction "Function", on obtient un
56 objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
57 "AdjointOperator". On contrôle l'approximation DF avec l'incrément
58 multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
59 "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
60 centrées si le booléen "centeredDF" est vrai.
63 name = "FDApproximation",
68 extraArguments = None,
69 avoidingRedundancy = True,
70 toleranceInRedundancy = 1.e-18,
71 lenghtOfRedundancy = -1,
76 self.__name = str(name)
77 self.__extraArgs = extraArguments
80 import multiprocessing
81 self.__mpEnabled = True
83 self.__mpEnabled = False
85 self.__mpEnabled = False
86 self.__mpWorkers = mpWorkers
87 if self.__mpWorkers is not None and self.__mpWorkers < 1:
88 self.__mpWorkers = None
89 logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
92 self.__mfEnabled = True
94 self.__mfEnabled = False
95 logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
97 if avoidingRedundancy:
99 self.__tolerBP = float(toleranceInRedundancy)
100 self.__lenghtRJ = int(lenghtOfRedundancy)
101 self.__listJPCP = [] # Jacobian Previous Calculated Points
102 self.__listJPCI = [] # Jacobian Previous Calculated Increment
103 self.__listJPCR = [] # Jacobian Previous Calculated Results
104 self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
105 self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
107 self.__avoidRC = False
110 if isinstance(Function,types.FunctionType):
111 logging.debug("FDA Calculs en multiprocessing : FunctionType")
112 self.__userFunction__name = Function.__name__
114 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
116 mod = os.path.abspath(Function.__globals__['__file__'])
117 if not os.path.isfile(mod):
118 raise ImportError("No user defined function or method found with the name %s"%(mod,))
119 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
120 self.__userFunction__path = os.path.dirname(mod)
122 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
123 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
124 elif isinstance(Function,types.MethodType):
125 logging.debug("FDA Calculs en multiprocessing : MethodType")
126 self.__userFunction__name = Function.__name__
128 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
130 mod = os.path.abspath(Function.__func__.__globals__['__file__'])
131 if not os.path.isfile(mod):
132 raise ImportError("No user defined function or method found with the name %s"%(mod,))
133 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
134 self.__userFunction__path = os.path.dirname(mod)
136 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
137 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
139 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
141 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
142 self.__userFunction = self.__userOperator.appliedTo
144 self.__centeredDF = bool(centeredDF)
145 if abs(float(increment)) > 1.e-15:
146 self.__increment = float(increment)
148 self.__increment = 0.01
152 self.__dX = numpy.asmatrix(numpy.ravel( dX )).T
153 logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
155 logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
157 # ---------------------------------------------------------
158 def __doublon__(self, e, l, n, v=None):
159 __ac, __iac = False, -1
160 for i in range(len(l)-1,-1,-1):
161 if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
162 __ac, __iac = True, i
163 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
167 # ---------------------------------------------------------
168 def DirectOperator(self, X, **extraArgs ):
170 Calcul du direct à l'aide de la fonction fournie.
172 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
173 ne doivent pas être données ici à la fonction utilisateur.
175 logging.debug("FDA Calcul DirectOperator (explicite)")
177 _HX = self.__userFunction( X, argsAsSerie = True )
179 _X = numpy.asmatrix(numpy.ravel( X )).T
180 _HX = numpy.ravel(self.__userFunction( _X ))
184 # ---------------------------------------------------------
185 def TangentMatrix(self, X ):
187 Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
188 c'est-à-dire le gradient de H en X. On utilise des différences finies
189 directionnelles autour du point X. X est un numpy.matrix.
191 Différences finies centrées (approximation d'ordre 2):
192 1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
193 dX[i] à la composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
194 on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
196 2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
198 3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
200 Différences finies non centrées (approximation d'ordre 1):
201 1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
202 composante X[i] pour composer X_plus_dXi, et on calcule la réponse
203 HX_plus_dXi = H( X_plus_dXi )
204 2/ On calcule la valeur centrale HX = H(X)
205 3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
207 4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
210 logging.debug("FDA Début du calcul de la Jacobienne")
211 logging.debug("FDA Incrément de............: %s*X"%float(self.__increment))
212 logging.debug("FDA Approximation centrée...: %s"%(self.__centeredDF))
214 if X is None or len(X)==0:
215 raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
217 _X = numpy.asmatrix(numpy.ravel( X )).T
219 if self.__dX is None:
220 _dX = self.__increment * _X
222 _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
224 if (_dX == 0.).any():
227 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
229 _dX = numpy.where( _dX == 0., moyenne, _dX )
231 __alreadyCalculated = False
233 __bidon, __alreadyCalculatedP = self.__doublon__(_X, self.__listJPCP, self.__listJPPN, None)
234 __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
235 if __alreadyCalculatedP == __alreadyCalculatedI > -1:
236 __alreadyCalculated, __i = True, __alreadyCalculatedP
237 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
239 if __alreadyCalculated:
240 logging.debug("FDA Calcul Jacobienne (par récupération du doublon %i)"%__i)
241 _Jacobienne = self.__listJPCR[__i]
243 logging.debug("FDA Calcul Jacobienne (explicite)")
244 if self.__centeredDF:
246 if self.__mpEnabled and not self.__mfEnabled:
248 "__userFunction__path" : self.__userFunction__path,
249 "__userFunction__modl" : self.__userFunction__modl,
250 "__userFunction__name" : self.__userFunction__name,
253 for i in range( len(_dX) ):
255 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
256 _X_plus_dXi[i] = _X[i] + _dXi
257 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
258 _X_moins_dXi[i] = _X[i] - _dXi
260 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
261 _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
263 import multiprocessing
264 self.__pool = multiprocessing.Pool(self.__mpWorkers)
265 _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
270 for i in range( len(_dX) ):
271 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
273 elif self.__mfEnabled:
275 for i in range( len(_dX) ):
277 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
278 _X_plus_dXi[i] = _X[i] + _dXi
279 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
280 _X_moins_dXi[i] = _X[i] - _dXi
282 _xserie.append( _X_plus_dXi )
283 _xserie.append( _X_moins_dXi )
285 _HX_plusmoins_dX = self.DirectOperator( _xserie )
288 for i in range( len(_dX) ):
289 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
293 for i in range( _dX.size ):
295 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
296 _X_plus_dXi[i] = _X[i] + _dXi
297 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
298 _X_moins_dXi[i] = _X[i] - _dXi
300 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
301 _HX_moins_dXi = self.DirectOperator( _X_moins_dXi )
303 _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
307 if self.__mpEnabled and not self.__mfEnabled:
309 "__userFunction__path" : self.__userFunction__path,
310 "__userFunction__modl" : self.__userFunction__modl,
311 "__userFunction__name" : self.__userFunction__name,
314 _jobs.append( (_X.A1, self.__extraArgs, funcrepr) )
315 for i in range( len(_dX) ):
316 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
317 _X_plus_dXi[i] = _X[i] + _dX[i]
319 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
321 import multiprocessing
322 self.__pool = multiprocessing.Pool(self.__mpWorkers)
323 _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
327 _HX = _HX_plus_dX.pop(0)
330 for i in range( len(_dX) ):
331 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
333 elif self.__mfEnabled:
335 _xserie.append( _X.A1 )
336 for i in range( len(_dX) ):
337 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
338 _X_plus_dXi[i] = _X[i] + _dX[i]
340 _xserie.append( _X_plus_dXi )
342 _HX_plus_dX = self.DirectOperator( _xserie )
344 _HX = _HX_plus_dX.pop(0)
347 for i in range( len(_dX) ):
348 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
352 _HX = self.DirectOperator( _X )
353 for i in range( _dX.size ):
355 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
356 _X_plus_dXi[i] = _X[i] + _dXi
358 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
360 _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
363 _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
365 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
366 while len(self.__listJPCP) > self.__lenghtRJ:
367 self.__listJPCP.pop(0)
368 self.__listJPCI.pop(0)
369 self.__listJPCR.pop(0)
370 self.__listJPPN.pop(0)
371 self.__listJPIN.pop(0)
372 self.__listJPCP.append( copy.copy(_X) )
373 self.__listJPCI.append( copy.copy(_dX) )
374 self.__listJPCR.append( copy.copy(_Jacobienne) )
375 self.__listJPPN.append( numpy.linalg.norm(_X) )
376 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
378 logging.debug("FDA Fin du calcul de la Jacobienne")
382 # ---------------------------------------------------------
383 def TangentOperator(self, paire, **extraArgs ):
385 Calcul du tangent à l'aide de la Jacobienne.
387 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
388 ne doivent pas être données ici à la fonction utilisateur.
391 assert len(paire) == 1, "Incorrect lenght of arguments"
393 assert len(_paire) == 2, "Incorrect number of arguments"
395 assert len(paire) == 2, "Incorrect number of arguments"
398 _Jacobienne = self.TangentMatrix( X )
399 if dX is None or len(dX) == 0:
401 # Calcul de la forme matricielle si le second argument est None
402 # -------------------------------------------------------------
403 if self.__mfEnabled: return [_Jacobienne,]
404 else: return _Jacobienne
407 # Calcul de la valeur linéarisée de H en X appliqué à dX
408 # ------------------------------------------------------
409 _dX = numpy.asmatrix(numpy.ravel( dX )).T
410 _HtX = numpy.dot(_Jacobienne, _dX)
411 if self.__mfEnabled: return [_HtX.A1,]
414 # ---------------------------------------------------------
415 def AdjointOperator(self, paire, **extraArgs ):
417 Calcul de l'adjoint à l'aide de la Jacobienne.
419 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
420 ne doivent pas être données ici à la fonction utilisateur.
423 assert len(paire) == 1, "Incorrect lenght of arguments"
425 assert len(_paire) == 2, "Incorrect number of arguments"
427 assert len(paire) == 2, "Incorrect number of arguments"
430 _JacobienneT = self.TangentMatrix( X ).T
431 if Y is None or len(Y) == 0:
433 # Calcul de la forme matricielle si le second argument est None
434 # -------------------------------------------------------------
435 if self.__mfEnabled: return [_JacobienneT,]
436 else: return _JacobienneT
439 # Calcul de la valeur de l'adjoint en X appliqué à Y
440 # --------------------------------------------------
441 _Y = numpy.asmatrix(numpy.ravel( Y )).T
442 _HaY = numpy.dot(_JacobienneT, _Y)
443 if self.__mfEnabled: return [_HaY.A1,]
446 # ==============================================================================
447 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
448 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
450 _bgcenter = numpy.ravel(_bgcenter)[:,None]
452 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
454 if _bgcovariance is None:
455 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
457 _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
458 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
460 return BackgroundEnsemble
462 # ==============================================================================
463 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
464 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
465 def __CenteredRandomAnomalies(Zr, N):
467 Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
468 notes manuscrites de MB et conforme au code de PS avec eps = -1
471 Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
472 Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
473 R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
478 _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
480 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
481 if _bgcovariance is None:
482 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
485 U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
486 _nbctl = _bgcenter.size
487 if _nbmembers > _nbctl:
488 _Z = numpy.concatenate((numpy.dot(
489 numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
490 numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
492 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
493 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
494 BackgroundEnsemble = _bgcenter + _Zca
496 if max(abs(_bgcovariance.flatten())) > 0:
497 _nbctl = _bgcenter.size
498 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
499 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
500 BackgroundEnsemble = _bgcenter + _Zca
502 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
504 return BackgroundEnsemble
506 # ==============================================================================
507 def EnsembleOfAnomalies( Ensemble, OptMean = None, Normalisation = 1.):
508 "Renvoie les anomalies centrées à partir d'un ensemble TailleEtat*NbMembres"
510 __Em = numpy.asarray(Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
512 __Em = numpy.ravel(OptMean).reshape((-1,1))
514 return Normalisation * (numpy.asarray(Ensemble) - __Em)
516 # ==============================================================================
517 def EnsembleErrorCovariance( Ensemble, __quick = False ):
518 "Renvoie l'estimation empirique de la covariance d'ensemble"
520 # Covariance rapide mais rarement définie positive
521 __Covariance = numpy.cov(Ensemble)
523 # Résultat souvent identique à numpy.cov, mais plus robuste
524 __n, __m = numpy.asarray(Ensemble).shape
525 __Anomalies = EnsembleOfAnomalies( Ensemble )
526 # Estimation empirique
527 __Covariance = (__Anomalies @ __Anomalies.T) / (__m-1)
529 __Covariance = (__Covariance + __Covariance.T) * 0.5
530 # Assure la positivité
531 __epsilon = mpr*numpy.trace(__Covariance)
532 __Covariance = __Covariance + __epsilon * numpy.identity(__n)
536 # ==============================================================================
537 def EnsemblePerturbationWithGivenCovariance( __Ensemble, __Covariance, __Seed=None ):
538 "Ajout d'une perturbation à chaque membre d'un ensemble selon une covariance prescrite"
539 if hasattr(__Covariance,"assparsematrix"):
540 if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance.assparsematrix())/abs(__Ensemble).mean() < mpr).all():
541 # Traitement d'une covariance nulle ou presque
543 if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance.assparsematrix()) < mpr).all():
544 # Traitement d'une covariance nulle ou presque
547 if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance)/abs(__Ensemble).mean() < mpr).all():
548 # Traitement d'une covariance nulle ou presque
550 if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance) < mpr).all():
551 # Traitement d'une covariance nulle ou presque
554 __n, __m = __Ensemble.shape
555 if __Seed is not None: numpy.random.seed(__Seed)
557 if hasattr(__Covariance,"isscalar") and __Covariance.isscalar():
558 # Traitement d'une covariance multiple de l'identité
560 __std = numpy.sqrt(__Covariance.assparsematrix())
561 __Ensemble += numpy.random.normal(__zero, __std, size=(__m,__n)).T
563 elif hasattr(__Covariance,"isvector") and __Covariance.isvector():
564 # Traitement d'une covariance diagonale avec variances non identiques
565 __zero = numpy.zeros(__n)
566 __std = numpy.sqrt(__Covariance.assparsematrix())
567 __Ensemble += numpy.asarray([numpy.random.normal(__zero, __std) for i in range(__m)]).T
569 elif hasattr(__Covariance,"ismatrix") and __Covariance.ismatrix():
570 # Traitement d'une covariance pleine
571 __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance.asfullmatrix(__n), size=__m).T
573 elif isinstance(__Covariance, numpy.ndarray):
574 # Traitement d'une covariance numpy pleine, sachant qu'on arrive ici en dernier
575 __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance, size=__m).T
578 raise ValueError("Error in ensemble perturbation with inadequate covariance specification")
582 # ==============================================================================
583 def CovarianceInflation(
585 InflationType = None,
586 InflationFactor = None,
587 BackgroundCov = None,
590 Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
592 Synthèse : Hunt 2007, section 2.3.5
594 if InflationFactor is None:
597 InflationFactor = float(InflationFactor)
599 if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
600 if InflationFactor < 1.:
601 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
602 if InflationFactor < 1.+mpr:
604 OutputCovOrEns = InflationFactor**2 * InputCovOrEns
606 elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
607 if InflationFactor < 1.:
608 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
609 if InflationFactor < 1.+mpr:
611 InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
612 OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
613 + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
615 elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
616 if InflationFactor < 0.:
617 raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
618 if InflationFactor < mpr:
620 __n, __m = numpy.asarray(InputCovOrEns).shape
622 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
623 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
625 elif InflationType == "HybridOnBackgroundCovariance":
626 if InflationFactor < 0.:
627 raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
628 if InflationFactor < mpr:
630 __n, __m = numpy.asarray(InputCovOrEns).shape
632 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
633 if BackgroundCov is None:
634 raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
635 if InputCovOrEns.shape != BackgroundCov.shape:
636 raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
637 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
639 elif InflationType == "Relaxation":
640 raise NotImplementedError("InflationType Relaxation")
643 raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
645 return OutputCovOrEns
647 # ==============================================================================
648 def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
654 H = HO["Direct"].appliedControledFormTo
656 if selfA._parameters["EstimationOf"] == "State":
657 M = EM["Direct"].appliedControledFormTo
659 if CM is not None and "Tangent" in CM and U is not None:
660 Cm = CM["Tangent"].asMatrix(Xb)
664 # Précalcul des inversions de B et R
667 # Durée d'observation et tailles
668 LagL = selfA._parameters["SmootherLagL"]
669 if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
670 raise ValueError("Fixed-lag smoother requires a series of observation")
671 if Y.stepnumber() < LagL:
672 raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
673 duration = Y.stepnumber()
674 __p = numpy.cumprod(Y.shape())[-1]
676 __m = selfA._parameters["NumberOfMembers"]
678 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
680 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
681 selfA.StoredVariables["Analysis"].store( Xb )
682 if selfA._toStore("APosterioriCovariance"):
683 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
686 # Calcul direct initial (on privilégie la mémorisation au recalcul)
687 __seed = numpy.random.get_state()
688 selfB = copy.deepcopy(selfA)
689 selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
690 if VariantM == "EnKS16-KalmanFilterFormula":
691 etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
693 raise ValueError("VariantM has to be chosen in the authorized methods list.")
695 EL = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
697 EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
698 selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
700 for step in range(LagL,duration-1):
702 sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
705 if hasattr(Y,"store"):
706 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
708 Ynpu = numpy.ravel( Y ).reshape((__p,1))
711 if hasattr(U,"store") and len(U)>1:
712 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
713 elif hasattr(U,"store") and len(U)==1:
714 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
716 Un = numpy.asmatrix(numpy.ravel( U )).T
720 #--------------------------
721 if VariantM == "EnKS16-KalmanFilterFormula":
722 if selfA._parameters["EstimationOf"] == "State": # Forecast
723 EL = M( [(EL[:,i], Un) for i in range(__m)],
725 returnSerieAsArrayMatrix = True )
726 EL = EnsemblePerturbationWithGivenCovariance( EL, Q )
727 EZ = H( [(EL[:,i], Un) for i in range(__m)],
729 returnSerieAsArrayMatrix = True )
730 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
731 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
733 elif selfA._parameters["EstimationOf"] == "Parameters":
734 # --- > Par principe, M = Id, Q = 0
735 EZ = H( [(EL[:,i], Un) for i in range(__m)],
737 returnSerieAsArrayMatrix = True )
739 vEm = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
740 vZm = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
742 mS = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
743 delta = RIdemi @ ( Ynpu - vZm )
744 mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
745 vw = mT @ mS.T @ delta
747 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
748 mU = numpy.identity(__m)
749 wTU = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
751 EX = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
755 for irl in range(LagL): # Lissage des L précédentes analysis
756 vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
757 EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
758 sEL[irl] = vEm + EX @ wTU
760 # Conservation de l'analyse retrospective d'ordre 0 avant rotation
761 Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
762 if selfA._toStore("APosterioriCovariance"):
765 for irl in range(LagL):
766 sEL[irl] = sEL[irl+1]
768 #--------------------------
770 raise ValueError("VariantM has to be chosen in the authorized methods list.")
772 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
774 selfA.StoredVariables["Analysis"].store( Xa )
775 if selfA._toStore("APosterioriCovariance"):
776 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
778 # Stockage des dernières analyses incomplètement remises à jour
779 for irl in range(LagL):
780 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
781 Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
782 selfA.StoredVariables["Analysis"].store( Xa )
786 # ==============================================================================
787 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
789 Ensemble-Transform EnKF
791 if selfA._parameters["EstimationOf"] == "Parameters":
792 selfA._parameters["StoreInternalVariables"] = True
796 H = HO["Direct"].appliedControledFormTo
798 if selfA._parameters["EstimationOf"] == "State":
799 M = EM["Direct"].appliedControledFormTo
801 if CM is not None and "Tangent" in CM and U is not None:
802 Cm = CM["Tangent"].asMatrix(Xb)
806 # Nombre de pas identique au nombre de pas d'observations
807 # -------------------------------------------------------
808 if hasattr(Y,"stepnumber"):
809 duration = Y.stepnumber()
810 __p = numpy.cumprod(Y.shape())[-1]
813 __p = numpy.array(Y).size
815 # Précalcul des inversions de B et R
816 # ----------------------------------
817 if selfA._parameters["StoreInternalVariables"] \
818 or selfA._toStore("CostFunctionJ") \
819 or selfA._toStore("CostFunctionJb") \
820 or selfA._toStore("CostFunctionJo") \
821 or selfA._toStore("CurrentOptimum") \
822 or selfA._toStore("APosterioriCovariance"):
825 elif VariantM != "KalmanFilterFormula":
827 if VariantM == "KalmanFilterFormula":
833 __m = selfA._parameters["NumberOfMembers"]
834 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
836 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
837 #~ Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
839 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
840 selfA.StoredVariables["Analysis"].store( Xb )
841 if selfA._toStore("APosterioriCovariance"):
842 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
845 previousJMinimum = numpy.finfo(float).max
847 for step in range(duration-1):
848 if hasattr(Y,"store"):
849 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
851 Ynpu = numpy.ravel( Y ).reshape((__p,1))
854 if hasattr(U,"store") and len(U)>1:
855 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
856 elif hasattr(U,"store") and len(U)==1:
857 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
859 Un = numpy.asmatrix(numpy.ravel( U )).T
863 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
864 Xn = CovarianceInflation( Xn,
865 selfA._parameters["InflationType"],
866 selfA._parameters["InflationFactor"],
869 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
870 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
872 returnSerieAsArrayMatrix = True )
873 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
874 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
876 returnSerieAsArrayMatrix = True )
877 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
878 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
879 Xn_predicted = Xn_predicted + Cm * Un
880 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
881 # --- > Par principe, M = Id, Q = 0
883 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
885 returnSerieAsArrayMatrix = True )
887 # Mean of forecast and observation of forecast
888 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
889 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
892 EaX = EnsembleOfAnomalies( Xn_predicted, Xfm )
893 EaHX = EnsembleOfAnomalies( HX_predicted, Hfm)
895 #--------------------------
896 if VariantM == "KalmanFilterFormula":
897 mS = RIdemi * EaHX / math.sqrt(__m-1)
898 delta = RIdemi * ( Ynpu - Hfm )
899 mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
900 vw = mT @ mS.T @ delta
902 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
903 mU = numpy.identity(__m)
905 EaX = EaX / math.sqrt(__m-1)
906 Xn = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
907 #--------------------------
908 elif VariantM == "Variational":
909 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
911 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
912 _Jo = 0.5 * _A.T @ (RI * _A)
913 _Jb = 0.5 * (__m-1) * w.T @ w
916 def GradientOfCostFunction(w):
917 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
918 _GardJo = - EaHX.T @ (RI * _A)
919 _GradJb = (__m-1) * w.reshape((__m,1))
920 _GradJ = _GardJo + _GradJb
921 return numpy.ravel(_GradJ)
922 vw = scipy.optimize.fmin_cg(
924 x0 = numpy.zeros(__m),
925 fprime = GradientOfCostFunction,
930 Hto = EaHX.T @ (RI * EaHX)
931 Htb = (__m-1) * numpy.identity(__m)
934 Pta = numpy.linalg.inv( Hta )
935 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
937 Xn = Xfm + EaX @ (vw[:,None] + EWa)
938 #--------------------------
939 elif VariantM == "FiniteSize11": # Jauge Boc2011
940 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
942 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
943 _Jo = 0.5 * _A.T @ (RI * _A)
944 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
947 def GradientOfCostFunction(w):
948 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
949 _GardJo = - EaHX.T @ (RI * _A)
950 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
951 _GradJ = _GardJo + _GradJb
952 return numpy.ravel(_GradJ)
953 vw = scipy.optimize.fmin_cg(
955 x0 = numpy.zeros(__m),
956 fprime = GradientOfCostFunction,
961 Hto = EaHX.T @ (RI * EaHX)
963 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
964 / (1 + 1/__m + vw.T @ vw)**2
967 Pta = numpy.linalg.inv( Hta )
968 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
970 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
971 #--------------------------
972 elif VariantM == "FiniteSize15": # Jauge Boc2015
973 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
975 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
976 _Jo = 0.5 * _A.T * RI * _A
977 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
980 def GradientOfCostFunction(w):
981 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
982 _GardJo = - EaHX.T @ (RI * _A)
983 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
984 _GradJ = _GardJo + _GradJb
985 return numpy.ravel(_GradJ)
986 vw = scipy.optimize.fmin_cg(
988 x0 = numpy.zeros(__m),
989 fprime = GradientOfCostFunction,
994 Hto = EaHX.T @ (RI * EaHX)
996 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
997 / (1 + 1/__m + vw.T @ vw)**2
1000 Pta = numpy.linalg.inv( Hta )
1001 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1003 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1004 #--------------------------
1005 elif VariantM == "FiniteSize16": # Jauge Boc2016
1006 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1007 def CostFunction(w):
1008 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1009 _Jo = 0.5 * _A.T @ (RI * _A)
1010 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
1013 def GradientOfCostFunction(w):
1014 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1015 _GardJo = - EaHX.T @ (RI * _A)
1016 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
1017 _GradJ = _GardJo + _GradJb
1018 return numpy.ravel(_GradJ)
1019 vw = scipy.optimize.fmin_cg(
1021 x0 = numpy.zeros(__m),
1022 fprime = GradientOfCostFunction,
1027 Hto = EaHX.T @ (RI * EaHX)
1028 Htb = ((__m+1) / (__m-1)) * \
1029 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
1030 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
1033 Pta = numpy.linalg.inv( Hta )
1034 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1036 Xn = Xfm + EaX @ (vw[:,None] + EWa)
1037 #--------------------------
1039 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1041 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1042 Xn = CovarianceInflation( Xn,
1043 selfA._parameters["InflationType"],
1044 selfA._parameters["InflationFactor"],
1047 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1048 #--------------------------
1050 if selfA._parameters["StoreInternalVariables"] \
1051 or selfA._toStore("CostFunctionJ") \
1052 or selfA._toStore("CostFunctionJb") \
1053 or selfA._toStore("CostFunctionJo") \
1054 or selfA._toStore("APosterioriCovariance") \
1055 or selfA._toStore("InnovationAtCurrentAnalysis") \
1056 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1057 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1058 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1059 _Innovation = Ynpu - _HXa
1061 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1062 # ---> avec analysis
1063 selfA.StoredVariables["Analysis"].store( Xa )
1064 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1065 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1066 if selfA._toStore("InnovationAtCurrentAnalysis"):
1067 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1068 # ---> avec current state
1069 if selfA._parameters["StoreInternalVariables"] \
1070 or selfA._toStore("CurrentState"):
1071 selfA.StoredVariables["CurrentState"].store( Xn )
1072 if selfA._toStore("ForecastState"):
1073 selfA.StoredVariables["ForecastState"].store( EMX )
1074 if selfA._toStore("BMA"):
1075 selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
1076 if selfA._toStore("InnovationAtCurrentState"):
1077 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1078 if selfA._toStore("SimulatedObservationAtCurrentState") \
1079 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1080 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1082 if selfA._parameters["StoreInternalVariables"] \
1083 or selfA._toStore("CostFunctionJ") \
1084 or selfA._toStore("CostFunctionJb") \
1085 or selfA._toStore("CostFunctionJo") \
1086 or selfA._toStore("CurrentOptimum") \
1087 or selfA._toStore("APosterioriCovariance"):
1088 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1089 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1091 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1092 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1093 selfA.StoredVariables["CostFunctionJ" ].store( J )
1095 if selfA._toStore("IndexOfOptimum") \
1096 or selfA._toStore("CurrentOptimum") \
1097 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1098 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1099 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1100 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1101 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1102 if selfA._toStore("IndexOfOptimum"):
1103 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1104 if selfA._toStore("CurrentOptimum"):
1105 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1106 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1107 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1108 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1109 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1110 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1111 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1112 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1113 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1114 if selfA._toStore("APosterioriCovariance"):
1115 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1116 if selfA._parameters["EstimationOf"] == "Parameters" \
1117 and J < previousJMinimum:
1118 previousJMinimum = J
1120 if selfA._toStore("APosterioriCovariance"):
1121 covarianceXaMin = Pn
1122 # ---> Pour les smoothers
1123 if selfA._toStore("CurrentEnsembleState"):
1124 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
1126 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1127 # ----------------------------------------------------------------------
1128 if selfA._parameters["EstimationOf"] == "Parameters":
1129 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1130 selfA.StoredVariables["Analysis"].store( XaMin )
1131 if selfA._toStore("APosterioriCovariance"):
1132 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1133 if selfA._toStore("BMA"):
1134 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1138 # ==============================================================================
1139 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
1140 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1144 if selfA._parameters["EstimationOf"] == "Parameters":
1145 selfA._parameters["StoreInternalVariables"] = True
1149 H = HO["Direct"].appliedControledFormTo
1151 if selfA._parameters["EstimationOf"] == "State":
1152 M = EM["Direct"].appliedControledFormTo
1154 if CM is not None and "Tangent" in CM and U is not None:
1155 Cm = CM["Tangent"].asMatrix(Xb)
1159 # Nombre de pas identique au nombre de pas d'observations
1160 # -------------------------------------------------------
1161 if hasattr(Y,"stepnumber"):
1162 duration = Y.stepnumber()
1163 __p = numpy.cumprod(Y.shape())[-1]
1166 __p = numpy.array(Y).size
1168 # Précalcul des inversions de B et R
1169 # ----------------------------------
1170 if selfA._parameters["StoreInternalVariables"] \
1171 or selfA._toStore("CostFunctionJ") \
1172 or selfA._toStore("CostFunctionJb") \
1173 or selfA._toStore("CostFunctionJo") \
1174 or selfA._toStore("CurrentOptimum") \
1175 or selfA._toStore("APosterioriCovariance"):
1182 __m = selfA._parameters["NumberOfMembers"]
1183 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1185 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1187 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
1189 Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
1191 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1192 selfA.StoredVariables["Analysis"].store( Xb )
1193 if selfA._toStore("APosterioriCovariance"):
1194 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1197 previousJMinimum = numpy.finfo(float).max
1199 for step in range(duration-1):
1200 if hasattr(Y,"store"):
1201 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1203 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1206 if hasattr(U,"store") and len(U)>1:
1207 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1208 elif hasattr(U,"store") and len(U)==1:
1209 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1211 Un = numpy.asmatrix(numpy.ravel( U )).T
1215 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1216 Xn = CovarianceInflation( Xn,
1217 selfA._parameters["InflationType"],
1218 selfA._parameters["InflationFactor"],
1221 #--------------------------
1222 if VariantM == "IEnKF12":
1223 Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
1224 EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
1228 Ta = numpy.identity(__m)
1229 vw = numpy.zeros(__m)
1230 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1231 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1234 E1 = vx1 + _epsilon * EaX
1236 E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1238 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
1239 E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1241 returnSerieAsArrayMatrix = True )
1242 elif selfA._parameters["EstimationOf"] == "Parameters":
1243 # --- > Par principe, M = Id
1245 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1246 vy1 = H((vx2, Un)).reshape((__p,1))
1248 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
1250 returnSerieAsArrayMatrix = True )
1251 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1254 EaY = (HE2 - vy2) / _epsilon
1256 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1258 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
1259 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY)
1260 Deltaw = - numpy.linalg.solve(mH,GradJ)
1265 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1269 A2 = EnsembleOfAnomalies( E2 )
1272 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1273 A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
1276 #--------------------------
1278 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1280 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1281 Xn = CovarianceInflation( Xn,
1282 selfA._parameters["InflationType"],
1283 selfA._parameters["InflationFactor"],
1286 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1287 #--------------------------
1289 if selfA._parameters["StoreInternalVariables"] \
1290 or selfA._toStore("CostFunctionJ") \
1291 or selfA._toStore("CostFunctionJb") \
1292 or selfA._toStore("CostFunctionJo") \
1293 or selfA._toStore("APosterioriCovariance") \
1294 or selfA._toStore("InnovationAtCurrentAnalysis") \
1295 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1296 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1297 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1298 _Innovation = Ynpu - _HXa
1300 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1301 # ---> avec analysis
1302 selfA.StoredVariables["Analysis"].store( Xa )
1303 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1304 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1305 if selfA._toStore("InnovationAtCurrentAnalysis"):
1306 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1307 # ---> avec current state
1308 if selfA._parameters["StoreInternalVariables"] \
1309 or selfA._toStore("CurrentState"):
1310 selfA.StoredVariables["CurrentState"].store( Xn )
1311 if selfA._toStore("ForecastState"):
1312 selfA.StoredVariables["ForecastState"].store( E2 )
1313 if selfA._toStore("BMA"):
1314 selfA.StoredVariables["BMA"].store( E2 - Xa )
1315 if selfA._toStore("InnovationAtCurrentState"):
1316 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1317 if selfA._toStore("SimulatedObservationAtCurrentState") \
1318 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1319 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1321 if selfA._parameters["StoreInternalVariables"] \
1322 or selfA._toStore("CostFunctionJ") \
1323 or selfA._toStore("CostFunctionJb") \
1324 or selfA._toStore("CostFunctionJo") \
1325 or selfA._toStore("CurrentOptimum") \
1326 or selfA._toStore("APosterioriCovariance"):
1327 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1328 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1330 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1331 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1332 selfA.StoredVariables["CostFunctionJ" ].store( J )
1334 if selfA._toStore("IndexOfOptimum") \
1335 or selfA._toStore("CurrentOptimum") \
1336 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1337 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1338 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1339 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1340 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1341 if selfA._toStore("IndexOfOptimum"):
1342 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1343 if selfA._toStore("CurrentOptimum"):
1344 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1345 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1346 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1347 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1348 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1349 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1350 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1351 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1352 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1353 if selfA._toStore("APosterioriCovariance"):
1354 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1355 if selfA._parameters["EstimationOf"] == "Parameters" \
1356 and J < previousJMinimum:
1357 previousJMinimum = J
1359 if selfA._toStore("APosterioriCovariance"):
1360 covarianceXaMin = Pn
1362 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1363 # ----------------------------------------------------------------------
1364 if selfA._parameters["EstimationOf"] == "Parameters":
1365 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1366 selfA.StoredVariables["Analysis"].store( XaMin )
1367 if selfA._toStore("APosterioriCovariance"):
1368 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1369 if selfA._toStore("BMA"):
1370 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1374 # ==============================================================================
1375 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1383 # Opérateur non-linéaire pour la boucle externe
1384 Hm = HO["Direct"].appliedTo
1386 # Précalcul des inversions de B et R
1390 # Point de démarrage de l'optimisation
1391 Xini = selfA._parameters["InitializationPoint"]
1393 HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
1394 Innovation = Y - HXb
1401 Xr = Xini.reshape((-1,1))
1402 while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
1406 Ht = HO["Tangent"].asMatrix(Xr)
1407 Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
1409 # Définition de la fonction-coût
1410 # ------------------------------
1411 def CostFunction(dx):
1412 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1413 if selfA._parameters["StoreInternalVariables"] or \
1414 selfA._toStore("CurrentState") or \
1415 selfA._toStore("CurrentOptimum"):
1416 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
1418 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1419 _dInnovation = Innovation - _HdX
1420 if selfA._toStore("SimulatedObservationAtCurrentState") or \
1421 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1422 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
1423 if selfA._toStore("InnovationAtCurrentState"):
1424 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
1426 Jb = float( 0.5 * _dX.T * BI * _dX )
1427 Jo = float( 0.5 * _dInnovation.T * RI * _dInnovation )
1430 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1431 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1432 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1433 selfA.StoredVariables["CostFunctionJ" ].store( J )
1434 if selfA._toStore("IndexOfOptimum") or \
1435 selfA._toStore("CurrentOptimum") or \
1436 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1437 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1438 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1439 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1440 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1441 if selfA._toStore("IndexOfOptimum"):
1442 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1443 if selfA._toStore("CurrentOptimum"):
1444 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1445 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1446 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1447 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1448 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1449 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1450 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1451 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1452 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1455 def GradientOfCostFunction(dx):
1456 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1458 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1459 _dInnovation = Innovation - _HdX
1461 GradJo = - Ht.T @ (RI * _dInnovation)
1462 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1465 # Minimisation de la fonctionnelle
1466 # --------------------------------
1467 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1469 if selfA._parameters["Minimizer"] == "LBFGSB":
1470 # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1471 if "0.19" <= scipy.version.version <= "1.1.0":
1472 import lbfgsbhlt as optimiseur
1474 import scipy.optimize as optimiseur
1475 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1476 func = CostFunction,
1477 x0 = numpy.zeros(Xini.size),
1478 fprime = GradientOfCostFunction,
1480 bounds = selfA._parameters["Bounds"],
1481 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
1482 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
1483 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1484 iprint = selfA._parameters["optiprint"],
1486 nfeval = Informations['funcalls']
1487 rc = Informations['warnflag']
1488 elif selfA._parameters["Minimizer"] == "TNC":
1489 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1490 func = CostFunction,
1491 x0 = numpy.zeros(Xini.size),
1492 fprime = GradientOfCostFunction,
1494 bounds = selfA._parameters["Bounds"],
1495 maxfun = selfA._parameters["MaximumNumberOfSteps"],
1496 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1497 ftol = selfA._parameters["CostDecrementTolerance"],
1498 messages = selfA._parameters["optmessages"],
1500 elif selfA._parameters["Minimizer"] == "CG":
1501 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1503 x0 = numpy.zeros(Xini.size),
1504 fprime = GradientOfCostFunction,
1506 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1507 gtol = selfA._parameters["GradientNormTolerance"],
1508 disp = selfA._parameters["optdisp"],
1511 elif selfA._parameters["Minimizer"] == "NCG":
1512 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1514 x0 = numpy.zeros(Xini.size),
1515 fprime = GradientOfCostFunction,
1517 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1518 avextol = selfA._parameters["CostDecrementTolerance"],
1519 disp = selfA._parameters["optdisp"],
1522 elif selfA._parameters["Minimizer"] == "BFGS":
1523 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1525 x0 = numpy.zeros(Xini.size),
1526 fprime = GradientOfCostFunction,
1528 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1529 gtol = selfA._parameters["GradientNormTolerance"],
1530 disp = selfA._parameters["optdisp"],
1534 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1536 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1537 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1539 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1540 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1541 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1543 Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
1546 DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
1547 iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
1549 # Obtention de l'analyse
1550 # ----------------------
1553 selfA.StoredVariables["Analysis"].store( Xa )
1555 if selfA._toStore("OMA") or \
1556 selfA._toStore("SigmaObs2") or \
1557 selfA._toStore("SimulationQuantiles") or \
1558 selfA._toStore("SimulatedObservationAtOptimum"):
1559 if selfA._toStore("SimulatedObservationAtCurrentState"):
1560 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1561 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1562 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1566 # Calcul de la covariance d'analyse
1567 # ---------------------------------
1568 if selfA._toStore("APosterioriCovariance") or \
1569 selfA._toStore("SimulationQuantiles") or \
1570 selfA._toStore("JacobianMatrixAtOptimum") or \
1571 selfA._toStore("KalmanGainAtOptimum"):
1572 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1573 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1574 if selfA._toStore("APosterioriCovariance") or \
1575 selfA._toStore("SimulationQuantiles") or \
1576 selfA._toStore("KalmanGainAtOptimum"):
1577 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1578 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1579 if selfA._toStore("APosterioriCovariance") or \
1580 selfA._toStore("SimulationQuantiles"):
1584 _ee = numpy.matrix(numpy.zeros(nb)).T
1586 _HtEE = numpy.dot(HtM,_ee)
1587 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
1588 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1589 HessienneI = numpy.matrix( HessienneI )
1591 if min(A.shape) != max(A.shape):
1592 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1593 if (numpy.diag(A) < 0).any():
1594 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1595 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1597 L = numpy.linalg.cholesky( A )
1599 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1600 if selfA._toStore("APosterioriCovariance"):
1601 selfA.StoredVariables["APosterioriCovariance"].store( A )
1602 if selfA._toStore("JacobianMatrixAtOptimum"):
1603 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1604 if selfA._toStore("KalmanGainAtOptimum"):
1605 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1606 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1607 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1609 # Calculs et/ou stockages supplémentaires
1610 # ---------------------------------------
1611 if selfA._toStore("Innovation") or \
1612 selfA._toStore("SigmaObs2") or \
1613 selfA._toStore("MahalanobisConsistency") or \
1614 selfA._toStore("OMB"):
1616 if selfA._toStore("Innovation"):
1617 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1618 if selfA._toStore("BMA"):
1619 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1620 if selfA._toStore("OMA"):
1621 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1622 if selfA._toStore("OMB"):
1623 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1624 if selfA._toStore("SigmaObs2"):
1625 TraceR = R.trace(Y.size)
1626 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1627 if selfA._toStore("MahalanobisConsistency"):
1628 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1629 if selfA._toStore("SimulationQuantiles"):
1630 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1631 HXa = numpy.matrix(numpy.ravel( HXa )).T
1634 for i in range(nech):
1635 if selfA._parameters["SimulationForQuantiles"] == "Linear":
1636 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1637 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1639 if selfA._toStore("SampledStateForQuantiles"): Xr = Xa+dXr
1640 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1641 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1642 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1645 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.ravel(Xr)
1647 YfQ = numpy.hstack((YfQ,Yr))
1648 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.vstack((EXr,numpy.ravel(Xr)))
1651 for quantile in selfA._parameters["Quantiles"]:
1652 if not (0. <= float(quantile) <= 1.): continue
1653 indice = int(nech * float(quantile) - 1./nech)
1654 if YQ is None: YQ = YfQ[:,indice]
1655 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
1656 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1657 if selfA._toStore("SampledStateForQuantiles"):
1658 selfA.StoredVariables["SampledStateForQuantiles"].store( EXr.T )
1659 if selfA._toStore("SimulatedObservationAtBackground"):
1660 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1661 if selfA._toStore("SimulatedObservationAtOptimum"):
1662 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1666 # ==============================================================================
1667 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
1668 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1670 Maximum Likelihood Ensemble Filter
1672 if selfA._parameters["EstimationOf"] == "Parameters":
1673 selfA._parameters["StoreInternalVariables"] = True
1677 H = HO["Direct"].appliedControledFormTo
1679 if selfA._parameters["EstimationOf"] == "State":
1680 M = EM["Direct"].appliedControledFormTo
1682 if CM is not None and "Tangent" in CM and U is not None:
1683 Cm = CM["Tangent"].asMatrix(Xb)
1687 # Nombre de pas identique au nombre de pas d'observations
1688 # -------------------------------------------------------
1689 if hasattr(Y,"stepnumber"):
1690 duration = Y.stepnumber()
1691 __p = numpy.cumprod(Y.shape())[-1]
1694 __p = numpy.array(Y).size
1696 # Précalcul des inversions de B et R
1697 # ----------------------------------
1698 if selfA._parameters["StoreInternalVariables"] \
1699 or selfA._toStore("CostFunctionJ") \
1700 or selfA._toStore("CostFunctionJb") \
1701 or selfA._toStore("CostFunctionJo") \
1702 or selfA._toStore("CurrentOptimum") \
1703 or selfA._toStore("APosterioriCovariance"):
1710 __m = selfA._parameters["NumberOfMembers"]
1711 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1713 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1715 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1717 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1718 selfA.StoredVariables["Analysis"].store( Xb )
1719 if selfA._toStore("APosterioriCovariance"):
1720 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1723 previousJMinimum = numpy.finfo(float).max
1725 for step in range(duration-1):
1726 if hasattr(Y,"store"):
1727 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1729 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1732 if hasattr(U,"store") and len(U)>1:
1733 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1734 elif hasattr(U,"store") and len(U)==1:
1735 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1737 Un = numpy.asmatrix(numpy.ravel( U )).T
1741 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1742 Xn = CovarianceInflation( Xn,
1743 selfA._parameters["InflationType"],
1744 selfA._parameters["InflationFactor"],
1747 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1748 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1750 returnSerieAsArrayMatrix = True )
1751 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
1752 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1753 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1754 Xn_predicted = Xn_predicted + Cm * Un
1755 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1756 # --- > Par principe, M = Id, Q = 0
1759 #--------------------------
1760 if VariantM == "MLEF13":
1761 Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
1762 EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
1763 Ua = numpy.identity(__m)
1767 Ta = numpy.identity(__m)
1768 vw = numpy.zeros(__m)
1769 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1770 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1773 E1 = vx1 + _epsilon * EaX
1775 E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1777 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1779 returnSerieAsArrayMatrix = True )
1780 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1783 EaY = (HE2 - vy2) / _epsilon
1785 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1787 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
1788 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY)
1789 Deltaw = - numpy.linalg.solve(mH,GradJ)
1794 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1799 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1801 Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
1802 #--------------------------
1804 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1806 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1807 Xn = CovarianceInflation( Xn,
1808 selfA._parameters["InflationType"],
1809 selfA._parameters["InflationFactor"],
1812 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1813 #--------------------------
1815 if selfA._parameters["StoreInternalVariables"] \
1816 or selfA._toStore("CostFunctionJ") \
1817 or selfA._toStore("CostFunctionJb") \
1818 or selfA._toStore("CostFunctionJo") \
1819 or selfA._toStore("APosterioriCovariance") \
1820 or selfA._toStore("InnovationAtCurrentAnalysis") \
1821 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1822 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1823 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1824 _Innovation = Ynpu - _HXa
1826 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1827 # ---> avec analysis
1828 selfA.StoredVariables["Analysis"].store( Xa )
1829 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1830 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1831 if selfA._toStore("InnovationAtCurrentAnalysis"):
1832 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1833 # ---> avec current state
1834 if selfA._parameters["StoreInternalVariables"] \
1835 or selfA._toStore("CurrentState"):
1836 selfA.StoredVariables["CurrentState"].store( Xn )
1837 if selfA._toStore("ForecastState"):
1838 selfA.StoredVariables["ForecastState"].store( EMX )
1839 if selfA._toStore("BMA"):
1840 selfA.StoredVariables["BMA"].store( EMX - Xa )
1841 if selfA._toStore("InnovationAtCurrentState"):
1842 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1843 if selfA._toStore("SimulatedObservationAtCurrentState") \
1844 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1845 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1847 if selfA._parameters["StoreInternalVariables"] \
1848 or selfA._toStore("CostFunctionJ") \
1849 or selfA._toStore("CostFunctionJb") \
1850 or selfA._toStore("CostFunctionJo") \
1851 or selfA._toStore("CurrentOptimum") \
1852 or selfA._toStore("APosterioriCovariance"):
1853 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1854 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1856 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1857 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1858 selfA.StoredVariables["CostFunctionJ" ].store( J )
1860 if selfA._toStore("IndexOfOptimum") \
1861 or selfA._toStore("CurrentOptimum") \
1862 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1863 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1864 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1865 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1866 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1867 if selfA._toStore("IndexOfOptimum"):
1868 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1869 if selfA._toStore("CurrentOptimum"):
1870 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1871 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1872 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1873 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1874 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1875 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1876 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1877 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1878 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1879 if selfA._toStore("APosterioriCovariance"):
1880 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1881 if selfA._parameters["EstimationOf"] == "Parameters" \
1882 and J < previousJMinimum:
1883 previousJMinimum = J
1885 if selfA._toStore("APosterioriCovariance"):
1886 covarianceXaMin = Pn
1888 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1889 # ----------------------------------------------------------------------
1890 if selfA._parameters["EstimationOf"] == "Parameters":
1891 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1892 selfA.StoredVariables["Analysis"].store( XaMin )
1893 if selfA._toStore("APosterioriCovariance"):
1894 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1895 if selfA._toStore("BMA"):
1896 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1900 # ==============================================================================
1912 Implémentation informatique de l'algorithme MMQR, basée sur la publication :
1913 David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
1914 Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
1917 # Recuperation des donnees et informations initiales
1918 # --------------------------------------------------
1919 variables = numpy.ravel( x0 )
1920 mesures = numpy.ravel( y )
1921 increment = sys.float_info[0]
1924 quantile = float(quantile)
1926 # Calcul des parametres du MM
1927 # ---------------------------
1928 tn = float(toler) / n
1929 e0 = -tn / math.log(tn)
1930 epsilon = (e0-tn)/(1+math.log(e0))
1932 # Calculs d'initialisation
1933 # ------------------------
1934 residus = mesures - numpy.ravel( func( variables ) )
1935 poids = 1./(epsilon+numpy.abs(residus))
1936 veps = 1. - 2. * quantile - residus * poids
1937 lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
1940 # Recherche iterative
1941 # -------------------
1942 while (increment > toler) and (iteration < maxfun) :
1945 Derivees = numpy.array(fprime(variables))
1946 Derivees = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
1947 DeriveesT = Derivees.transpose()
1948 M = numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
1949 SM = numpy.transpose(numpy.dot( DeriveesT , veps ))
1950 step = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
1952 variables = variables + step
1953 if bounds is not None:
1954 # Attention : boucle infinie à éviter si un intervalle est trop petit
1955 while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
1957 variables = variables - step
1958 residus = mesures - numpy.ravel( func(variables) )
1959 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
1961 while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
1963 variables = variables - step
1964 residus = mesures - numpy.ravel( func(variables) )
1965 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
1967 increment = lastsurrogate-surrogate
1968 poids = 1./(epsilon+numpy.abs(residus))
1969 veps = 1. - 2. * quantile - residus * poids
1970 lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
1974 Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
1976 return variables, Ecart, [n,p,iteration,increment,0]
1978 # ==============================================================================
1979 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
1981 3DVAR multi-pas et multi-méthodes
1986 Xn = numpy.ravel(Xb).reshape((-1,1))
1988 if selfA._parameters["EstimationOf"] == "State":
1989 M = EM["Direct"].appliedTo
1991 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1992 selfA.StoredVariables["Analysis"].store( Xn )
1993 if selfA._toStore("APosterioriCovariance"):
1994 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(Xn.size)
1996 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1997 if selfA._toStore("ForecastState"):
1998 selfA.StoredVariables["ForecastState"].store( Xn )
2000 if hasattr(Y,"stepnumber"):
2001 duration = Y.stepnumber()
2007 for step in range(duration-1):
2008 if hasattr(Y,"store"):
2009 Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
2011 Ynpu = numpy.ravel( Y ).reshape((-1,1))
2013 if selfA._parameters["EstimationOf"] == "State": # Forecast
2014 Xn = selfA.StoredVariables["Analysis"][-1]
2015 Xn_predicted = M( Xn )
2016 if selfA._toStore("ForecastState"):
2017 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
2018 elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
2019 # --- > Par principe, M = Id, Q = 0
2021 Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
2023 oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
2027 # ==============================================================================
2028 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2037 Hm = HO["Direct"].appliedTo
2039 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
2040 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2041 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2044 HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2045 if Y.size != HXb.size:
2046 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2047 if max(Y.shape) != max(HXb.shape):
2048 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2050 if selfA._toStore("JacobianMatrixAtBackground"):
2051 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2052 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2053 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2055 Ht = HO["Tangent"].asMatrix(Xb)
2057 HBHTpR = R + Ht * BHT
2058 Innovation = Y - HXb
2060 # Point de démarrage de l'optimisation
2061 Xini = numpy.zeros(Xb.shape)
2063 # Définition de la fonction-coût
2064 # ------------------------------
2065 def CostFunction(w):
2066 _W = numpy.asmatrix(numpy.ravel( w )).T
2067 if selfA._parameters["StoreInternalVariables"] or \
2068 selfA._toStore("CurrentState") or \
2069 selfA._toStore("CurrentOptimum"):
2070 selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
2071 if selfA._toStore("SimulatedObservationAtCurrentState") or \
2072 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2073 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
2074 if selfA._toStore("InnovationAtCurrentState"):
2075 selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
2077 Jb = float( 0.5 * _W.T * HBHTpR * _W )
2078 Jo = float( - _W.T * Innovation )
2081 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2082 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2083 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2084 selfA.StoredVariables["CostFunctionJ" ].store( J )
2085 if selfA._toStore("IndexOfOptimum") or \
2086 selfA._toStore("CurrentOptimum") or \
2087 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2088 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2089 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2090 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2091 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2092 if selfA._toStore("IndexOfOptimum"):
2093 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2094 if selfA._toStore("CurrentOptimum"):
2095 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2096 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2097 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2098 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2099 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2100 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2101 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2102 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2103 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2106 def GradientOfCostFunction(w):
2107 _W = numpy.asmatrix(numpy.ravel( w )).T
2108 GradJb = HBHTpR * _W
2109 GradJo = - Innovation
2110 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2113 # Minimisation de la fonctionnelle
2114 # --------------------------------
2115 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2117 if selfA._parameters["Minimizer"] == "LBFGSB":
2118 if "0.19" <= scipy.version.version <= "1.1.0":
2119 import lbfgsbhlt as optimiseur
2121 import scipy.optimize as optimiseur
2122 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2123 func = CostFunction,
2125 fprime = GradientOfCostFunction,
2127 bounds = selfA._parameters["Bounds"],
2128 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
2129 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
2130 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2131 iprint = selfA._parameters["optiprint"],
2133 nfeval = Informations['funcalls']
2134 rc = Informations['warnflag']
2135 elif selfA._parameters["Minimizer"] == "TNC":
2136 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2137 func = CostFunction,
2139 fprime = GradientOfCostFunction,
2141 bounds = selfA._parameters["Bounds"],
2142 maxfun = selfA._parameters["MaximumNumberOfSteps"],
2143 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2144 ftol = selfA._parameters["CostDecrementTolerance"],
2145 messages = selfA._parameters["optmessages"],
2147 elif selfA._parameters["Minimizer"] == "CG":
2148 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2151 fprime = GradientOfCostFunction,
2153 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2154 gtol = selfA._parameters["GradientNormTolerance"],
2155 disp = selfA._parameters["optdisp"],
2158 elif selfA._parameters["Minimizer"] == "NCG":
2159 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2162 fprime = GradientOfCostFunction,
2164 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2165 avextol = selfA._parameters["CostDecrementTolerance"],
2166 disp = selfA._parameters["optdisp"],
2169 elif selfA._parameters["Minimizer"] == "BFGS":
2170 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2173 fprime = GradientOfCostFunction,
2175 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2176 gtol = selfA._parameters["GradientNormTolerance"],
2177 disp = selfA._parameters["optdisp"],
2181 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2183 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2184 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2186 # Correction pour pallier a un bug de TNC sur le retour du Minimum
2187 # ----------------------------------------------------------------
2188 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2189 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2190 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
2192 Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
2194 # Obtention de l'analyse
2195 # ----------------------
2198 selfA.StoredVariables["Analysis"].store( Xa )
2200 if selfA._toStore("OMA") or \
2201 selfA._toStore("SigmaObs2") or \
2202 selfA._toStore("SimulationQuantiles") or \
2203 selfA._toStore("SimulatedObservationAtOptimum"):
2204 if selfA._toStore("SimulatedObservationAtCurrentState"):
2205 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2206 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2207 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2211 # Calcul de la covariance d'analyse
2212 # ---------------------------------
2213 if selfA._toStore("APosterioriCovariance") or \
2214 selfA._toStore("SimulationQuantiles") or \
2215 selfA._toStore("JacobianMatrixAtOptimum") or \
2216 selfA._toStore("KalmanGainAtOptimum"):
2217 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2218 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2219 if selfA._toStore("APosterioriCovariance") or \
2220 selfA._toStore("SimulationQuantiles") or \
2221 selfA._toStore("KalmanGainAtOptimum"):
2222 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2223 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2224 if selfA._toStore("APosterioriCovariance") or \
2225 selfA._toStore("SimulationQuantiles"):
2231 _ee = numpy.matrix(numpy.zeros(nb)).T
2233 _HtEE = numpy.dot(HtM,_ee)
2234 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
2235 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2236 HessienneI = numpy.matrix( HessienneI )
2238 if min(A.shape) != max(A.shape):
2239 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2240 if (numpy.diag(A) < 0).any():
2241 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2242 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2244 L = numpy.linalg.cholesky( A )
2246 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2247 if selfA._toStore("APosterioriCovariance"):
2248 selfA.StoredVariables["APosterioriCovariance"].store( A )
2249 if selfA._toStore("JacobianMatrixAtOptimum"):
2250 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2251 if selfA._toStore("KalmanGainAtOptimum"):
2252 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2253 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2254 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2256 # Calculs et/ou stockages supplémentaires
2257 # ---------------------------------------
2258 if selfA._toStore("Innovation") or \
2259 selfA._toStore("SigmaObs2") or \
2260 selfA._toStore("MahalanobisConsistency") or \
2261 selfA._toStore("OMB"):
2263 if selfA._toStore("Innovation"):
2264 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2265 if selfA._toStore("BMA"):
2266 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2267 if selfA._toStore("OMA"):
2268 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2269 if selfA._toStore("OMB"):
2270 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2271 if selfA._toStore("SigmaObs2"):
2272 TraceR = R.trace(Y.size)
2273 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2274 if selfA._toStore("MahalanobisConsistency"):
2275 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2276 if selfA._toStore("SimulationQuantiles"):
2277 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
2278 HXa = numpy.matrix(numpy.ravel( HXa )).T
2281 for i in range(nech):
2282 if selfA._parameters["SimulationForQuantiles"] == "Linear":
2283 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
2284 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
2286 if selfA._toStore("SampledStateForQuantiles"): Xr = Xa+dXr
2287 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
2288 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
2289 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
2292 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.ravel(Xr)
2294 YfQ = numpy.hstack((YfQ,Yr))
2295 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.vstack((EXr,numpy.ravel(Xr)))
2298 for quantile in selfA._parameters["Quantiles"]:
2299 if not (0. <= float(quantile) <= 1.): continue
2300 indice = int(nech * float(quantile) - 1./nech)
2301 if YQ is None: YQ = YfQ[:,indice]
2302 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
2303 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
2304 if selfA._toStore("SampledStateForQuantiles"):
2305 selfA.StoredVariables["SampledStateForQuantiles"].store( EXr.T )
2306 if selfA._toStore("SimulatedObservationAtBackground"):
2307 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2308 if selfA._toStore("SimulatedObservationAtOptimum"):
2309 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2313 # ==============================================================================
2314 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
2318 if selfA._parameters["EstimationOf"] == "Parameters":
2319 selfA._parameters["StoreInternalVariables"] = True
2322 H = HO["Direct"].appliedControledFormTo
2324 if selfA._parameters["EstimationOf"] == "State":
2325 M = EM["Direct"].appliedControledFormTo
2327 if CM is not None and "Tangent" in CM and U is not None:
2328 Cm = CM["Tangent"].asMatrix(Xb)
2332 # Durée d'observation et tailles
2333 if hasattr(Y,"stepnumber"):
2334 duration = Y.stepnumber()
2335 __p = numpy.cumprod(Y.shape())[-1]
2338 __p = numpy.array(Y).size
2340 # Précalcul des inversions de B et R
2341 if selfA._parameters["StoreInternalVariables"] \
2342 or selfA._toStore("CostFunctionJ") \
2343 or selfA._toStore("CostFunctionJb") \
2344 or selfA._toStore("CostFunctionJo") \
2345 or selfA._toStore("CurrentOptimum") \
2346 or selfA._toStore("APosterioriCovariance"):
2351 __m = selfA._parameters["NumberOfMembers"]
2353 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2355 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2357 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2359 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2360 selfA.StoredVariables["Analysis"].store( Xb )
2361 if selfA._toStore("APosterioriCovariance"):
2362 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2365 previousJMinimum = numpy.finfo(float).max
2367 for step in range(duration-1):
2368 if hasattr(Y,"store"):
2369 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2371 Ynpu = numpy.ravel( Y ).reshape((__p,1))
2374 if hasattr(U,"store") and len(U)>1:
2375 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2376 elif hasattr(U,"store") and len(U)==1:
2377 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2379 Un = numpy.asmatrix(numpy.ravel( U )).T
2383 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2384 Xn = CovarianceInflation( Xn,
2385 selfA._parameters["InflationType"],
2386 selfA._parameters["InflationFactor"],
2389 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2390 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2392 returnSerieAsArrayMatrix = True )
2393 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
2394 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2396 returnSerieAsArrayMatrix = True )
2397 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2398 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2399 Xn_predicted = Xn_predicted + Cm * Un
2400 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2401 # --- > Par principe, M = Id, Q = 0
2403 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2405 returnSerieAsArrayMatrix = True )
2407 # Mean of forecast and observation of forecast
2408 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2409 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2411 #--------------------------
2412 if VariantM == "KalmanFilterFormula05":
2413 PfHT, HPfHT = 0., 0.
2414 for i in range(__m):
2415 Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
2416 Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
2417 PfHT += Exfi * Eyfi.T
2418 HPfHT += Eyfi * Eyfi.T
2419 PfHT = (1./(__m-1)) * PfHT
2420 HPfHT = (1./(__m-1)) * HPfHT
2421 Kn = PfHT * ( R + HPfHT ).I
2424 for i in range(__m):
2425 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
2426 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
2427 #--------------------------
2428 elif VariantM == "KalmanFilterFormula16":
2429 EpY = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
2430 EpYm = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2432 EaX = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
2433 EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
2435 Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
2437 for i in range(__m):
2438 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
2439 #--------------------------
2441 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2443 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2444 Xn = CovarianceInflation( Xn,
2445 selfA._parameters["InflationType"],
2446 selfA._parameters["InflationFactor"],
2449 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2450 #--------------------------
2452 if selfA._parameters["StoreInternalVariables"] \
2453 or selfA._toStore("CostFunctionJ") \
2454 or selfA._toStore("CostFunctionJb") \
2455 or selfA._toStore("CostFunctionJo") \
2456 or selfA._toStore("APosterioriCovariance") \
2457 or selfA._toStore("InnovationAtCurrentAnalysis") \
2458 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2459 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2460 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2461 _Innovation = Ynpu - _HXa
2463 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2464 # ---> avec analysis
2465 selfA.StoredVariables["Analysis"].store( Xa )
2466 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2467 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2468 if selfA._toStore("InnovationAtCurrentAnalysis"):
2469 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2470 # ---> avec current state
2471 if selfA._parameters["StoreInternalVariables"] \
2472 or selfA._toStore("CurrentState"):
2473 selfA.StoredVariables["CurrentState"].store( Xn )
2474 if selfA._toStore("ForecastState"):
2475 selfA.StoredVariables["ForecastState"].store( EMX )
2476 if selfA._toStore("BMA"):
2477 selfA.StoredVariables["BMA"].store( EMX - Xa )
2478 if selfA._toStore("InnovationAtCurrentState"):
2479 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
2480 if selfA._toStore("SimulatedObservationAtCurrentState") \
2481 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2482 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2484 if selfA._parameters["StoreInternalVariables"] \
2485 or selfA._toStore("CostFunctionJ") \
2486 or selfA._toStore("CostFunctionJb") \
2487 or selfA._toStore("CostFunctionJo") \
2488 or selfA._toStore("CurrentOptimum") \
2489 or selfA._toStore("APosterioriCovariance"):
2490 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2491 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2493 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2494 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2495 selfA.StoredVariables["CostFunctionJ" ].store( J )
2497 if selfA._toStore("IndexOfOptimum") \
2498 or selfA._toStore("CurrentOptimum") \
2499 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2500 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2501 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2502 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2503 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2504 if selfA._toStore("IndexOfOptimum"):
2505 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2506 if selfA._toStore("CurrentOptimum"):
2507 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2508 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2509 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2510 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2511 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2512 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2513 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2514 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2515 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2516 if selfA._toStore("APosterioriCovariance"):
2517 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2518 if selfA._parameters["EstimationOf"] == "Parameters" \
2519 and J < previousJMinimum:
2520 previousJMinimum = J
2522 if selfA._toStore("APosterioriCovariance"):
2523 covarianceXaMin = Pn
2525 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2526 # ----------------------------------------------------------------------
2527 if selfA._parameters["EstimationOf"] == "Parameters":
2528 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2529 selfA.StoredVariables["Analysis"].store( XaMin )
2530 if selfA._toStore("APosterioriCovariance"):
2531 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2532 if selfA._toStore("BMA"):
2533 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2537 # ==============================================================================
2538 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2547 Hm = HO["Direct"].appliedTo
2548 Ha = HO["Adjoint"].appliedInXTo
2550 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
2551 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2552 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2555 HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2556 if Y.size != HXb.size:
2557 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2558 if max(Y.shape) != max(HXb.shape):
2559 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2561 if selfA._toStore("JacobianMatrixAtBackground"):
2562 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2563 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2564 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2566 # Précalcul des inversions de B et R
2570 # Point de démarrage de l'optimisation
2571 Xini = selfA._parameters["InitializationPoint"]
2573 # Définition de la fonction-coût
2574 # ------------------------------
2575 def CostFunction(x):
2576 _X = numpy.asmatrix(numpy.ravel( x )).T
2577 if selfA._parameters["StoreInternalVariables"] or \
2578 selfA._toStore("CurrentState") or \
2579 selfA._toStore("CurrentOptimum"):
2580 selfA.StoredVariables["CurrentState"].store( _X )
2582 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2583 _Innovation = Y - _HX
2584 if selfA._toStore("SimulatedObservationAtCurrentState") or \
2585 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2586 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
2587 if selfA._toStore("InnovationAtCurrentState"):
2588 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
2590 Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
2591 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2594 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2595 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2596 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2597 selfA.StoredVariables["CostFunctionJ" ].store( J )
2598 if selfA._toStore("IndexOfOptimum") or \
2599 selfA._toStore("CurrentOptimum") or \
2600 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2601 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2602 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2603 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2604 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2605 if selfA._toStore("IndexOfOptimum"):
2606 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2607 if selfA._toStore("CurrentOptimum"):
2608 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2609 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2610 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2611 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2612 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2613 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2614 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2615 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2616 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2619 def GradientOfCostFunction(x):
2620 _X = numpy.asmatrix(numpy.ravel( x )).T
2622 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2623 GradJb = BI * (_X - Xb)
2624 GradJo = - Ha( (_X, RI * (Y - _HX)) )
2625 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2628 # Minimisation de la fonctionnelle
2629 # --------------------------------
2630 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2632 if selfA._parameters["Minimizer"] == "LBFGSB":
2633 if "0.19" <= scipy.version.version <= "1.1.0":
2634 import lbfgsbhlt as optimiseur
2636 import scipy.optimize as optimiseur
2637 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2638 func = CostFunction,
2640 fprime = GradientOfCostFunction,
2642 bounds = selfA._parameters["Bounds"],
2643 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
2644 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
2645 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2646 iprint = selfA._parameters["optiprint"],
2648 nfeval = Informations['funcalls']
2649 rc = Informations['warnflag']
2650 elif selfA._parameters["Minimizer"] == "TNC":
2651 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2652 func = CostFunction,
2654 fprime = GradientOfCostFunction,
2656 bounds = selfA._parameters["Bounds"],
2657 maxfun = selfA._parameters["MaximumNumberOfSteps"],
2658 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2659 ftol = selfA._parameters["CostDecrementTolerance"],
2660 messages = selfA._parameters["optmessages"],
2662 elif selfA._parameters["Minimizer"] == "CG":
2663 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2666 fprime = GradientOfCostFunction,
2668 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2669 gtol = selfA._parameters["GradientNormTolerance"],
2670 disp = selfA._parameters["optdisp"],
2673 elif selfA._parameters["Minimizer"] == "NCG":
2674 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2677 fprime = GradientOfCostFunction,
2679 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2680 avextol = selfA._parameters["CostDecrementTolerance"],
2681 disp = selfA._parameters["optdisp"],
2684 elif selfA._parameters["Minimizer"] == "BFGS":
2685 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2688 fprime = GradientOfCostFunction,
2690 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2691 gtol = selfA._parameters["GradientNormTolerance"],
2692 disp = selfA._parameters["optdisp"],
2696 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2698 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2699 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2701 # Correction pour pallier a un bug de TNC sur le retour du Minimum
2702 # ----------------------------------------------------------------
2703 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2704 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2706 # Obtention de l'analyse
2707 # ----------------------
2708 Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
2710 selfA.StoredVariables["Analysis"].store( Xa )
2712 if selfA._toStore("OMA") or \
2713 selfA._toStore("SigmaObs2") or \
2714 selfA._toStore("SimulationQuantiles") or \
2715 selfA._toStore("SimulatedObservationAtOptimum"):
2716 if selfA._toStore("SimulatedObservationAtCurrentState"):
2717 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2718 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2719 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2723 # Calcul de la covariance d'analyse
2724 # ---------------------------------
2725 if selfA._toStore("APosterioriCovariance") or \
2726 selfA._toStore("SimulationQuantiles") or \
2727 selfA._toStore("JacobianMatrixAtOptimum") or \
2728 selfA._toStore("KalmanGainAtOptimum"):
2729 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2730 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2731 if selfA._toStore("APosterioriCovariance") or \
2732 selfA._toStore("SimulationQuantiles") or \
2733 selfA._toStore("KalmanGainAtOptimum"):
2734 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2735 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2736 if selfA._toStore("APosterioriCovariance") or \
2737 selfA._toStore("SimulationQuantiles"):
2741 _ee = numpy.matrix(numpy.zeros(nb)).T
2743 _HtEE = numpy.dot(HtM,_ee)
2744 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
2745 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2746 HessienneI = numpy.matrix( HessienneI )
2748 if min(A.shape) != max(A.shape):
2749 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2750 if (numpy.diag(A) < 0).any():
2751 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2752 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2754 L = numpy.linalg.cholesky( A )
2756 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2757 if selfA._toStore("APosterioriCovariance"):
2758 selfA.StoredVariables["APosterioriCovariance"].store( A )
2759 if selfA._toStore("JacobianMatrixAtOptimum"):
2760 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2761 if selfA._toStore("KalmanGainAtOptimum"):
2762 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2763 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2764 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2766 # Calculs et/ou stockages supplémentaires
2767 # ---------------------------------------
2768 if selfA._toStore("Innovation") or \
2769 selfA._toStore("SigmaObs2") or \
2770 selfA._toStore("MahalanobisConsistency") or \
2771 selfA._toStore("OMB"):
2773 if selfA._toStore("Innovation"):
2774 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2775 if selfA._toStore("BMA"):
2776 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2777 if selfA._toStore("OMA"):
2778 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2779 if selfA._toStore("OMB"):
2780 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2781 if selfA._toStore("SigmaObs2"):
2782 TraceR = R.trace(Y.size)
2783 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2784 if selfA._toStore("MahalanobisConsistency"):
2785 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2786 if selfA._toStore("SimulationQuantiles"):
2787 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
2788 HXa = numpy.matrix(numpy.ravel( HXa )).T
2791 for i in range(nech):
2792 if selfA._parameters["SimulationForQuantiles"] == "Linear":
2793 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
2794 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
2796 if selfA._toStore("SampledStateForQuantiles"): Xr = Xa+dXr
2797 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
2798 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
2799 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
2802 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.ravel(Xr)
2804 YfQ = numpy.hstack((YfQ,Yr))
2805 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.vstack((EXr,numpy.ravel(Xr)))
2808 for quantile in selfA._parameters["Quantiles"]:
2809 if not (0. <= float(quantile) <= 1.): continue
2810 indice = int(nech * float(quantile) - 1./nech)
2811 if YQ is None: YQ = YfQ[:,indice]
2812 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
2813 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
2814 if selfA._toStore("SampledStateForQuantiles"):
2815 selfA.StoredVariables["SampledStateForQuantiles"].store( EXr.T )
2816 if selfA._toStore("SimulatedObservationAtBackground"):
2817 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2818 if selfA._toStore("SimulatedObservationAtOptimum"):
2819 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2823 # ==============================================================================
2824 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2833 Hm = HO["Direct"].appliedControledFormTo
2834 Mm = EM["Direct"].appliedControledFormTo
2836 if CM is not None and "Tangent" in CM and U is not None:
2837 Cm = CM["Tangent"].asMatrix(Xb)
2843 if hasattr(U,"store") and 1<=_step<len(U) :
2844 _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
2845 elif hasattr(U,"store") and len(U)==1:
2846 _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2848 _Un = numpy.asmatrix(numpy.ravel( U )).T
2853 if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
2854 _Cm = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
2860 # Remarque : les observations sont exploitées à partir du pas de temps
2861 # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
2862 # Donc le pas 0 n'est pas utilisé puisque la première étape commence
2863 # avec l'observation du pas 1.
2865 # Nombre de pas identique au nombre de pas d'observations
2866 if hasattr(Y,"stepnumber"):
2867 duration = Y.stepnumber()
2871 # Précalcul des inversions de B et R
2875 # Point de démarrage de l'optimisation
2876 Xini = selfA._parameters["InitializationPoint"]
2878 # Définition de la fonction-coût
2879 # ------------------------------
2880 selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
2881 selfA.DirectInnovation = [None,] # Le pas 0 n'est pas observé
2882 def CostFunction(x):
2883 _X = numpy.asmatrix(numpy.ravel( x )).T
2884 if selfA._parameters["StoreInternalVariables"] or \
2885 selfA._toStore("CurrentState") or \
2886 selfA._toStore("CurrentOptimum"):
2887 selfA.StoredVariables["CurrentState"].store( _X )
2888 Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
2889 selfA.DirectCalculation = [None,]
2890 selfA.DirectInnovation = [None,]
2893 for step in range(0,duration-1):
2894 if hasattr(Y,"store"):
2895 _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
2897 _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
2901 if selfA._parameters["EstimationOf"] == "State":
2902 _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
2903 elif selfA._parameters["EstimationOf"] == "Parameters":
2906 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
2907 _Xn = numpy.max(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
2908 _Xn = numpy.min(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
2910 # Etape de différence aux observations
2911 if selfA._parameters["EstimationOf"] == "State":
2912 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
2913 elif selfA._parameters["EstimationOf"] == "Parameters":
2914 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
2916 # Stockage de l'état
2917 selfA.DirectCalculation.append( _Xn )
2918 selfA.DirectInnovation.append( _YmHMX )
2920 # Ajout dans la fonctionnelle d'observation
2921 Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
2924 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2925 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2926 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2927 selfA.StoredVariables["CostFunctionJ" ].store( J )
2928 if selfA._toStore("IndexOfOptimum") or \
2929 selfA._toStore("CurrentOptimum") or \
2930 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2931 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2932 selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2933 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2934 if selfA._toStore("IndexOfOptimum"):
2935 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2936 if selfA._toStore("CurrentOptimum"):
2937 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2938 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2939 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2940 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2941 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2942 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2943 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2946 def GradientOfCostFunction(x):
2947 _X = numpy.asmatrix(numpy.ravel( x )).T
2948 GradJb = BI * (_X - Xb)
2950 for step in range(duration-1,0,-1):
2951 # Étape de récupération du dernier stockage de l'évolution
2952 _Xn = selfA.DirectCalculation.pop()
2953 # Étape de récupération du dernier stockage de l'innovation
2954 _YmHMX = selfA.DirectInnovation.pop()
2955 # Calcul des adjoints
2956 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
2957 Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
2958 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
2959 Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
2960 # Calcul du gradient par état adjoint
2961 GradJo = GradJo + Ha * RI * _YmHMX # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
2962 GradJo = Ma * GradJo # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
2963 GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
2966 # Minimisation de la fonctionnelle
2967 # --------------------------------
2968 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2970 if selfA._parameters["Minimizer"] == "LBFGSB":
2971 if "0.19" <= scipy.version.version <= "1.1.0":
2972 import lbfgsbhlt as optimiseur
2974 import scipy.optimize as optimiseur
2975 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2976 func = CostFunction,
2978 fprime = GradientOfCostFunction,
2980 bounds = selfA._parameters["Bounds"],
2981 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
2982 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
2983 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2984 iprint = selfA._parameters["optiprint"],
2986 nfeval = Informations['funcalls']
2987 rc = Informations['warnflag']
2988 elif selfA._parameters["Minimizer"] == "TNC":
2989 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2990 func = CostFunction,
2992 fprime = GradientOfCostFunction,
2994 bounds = selfA._parameters["Bounds"],
2995 maxfun = selfA._parameters["MaximumNumberOfSteps"],
2996 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2997 ftol = selfA._parameters["CostDecrementTolerance"],
2998 messages = selfA._parameters["optmessages"],
3000 elif selfA._parameters["Minimizer"] == "CG":
3001 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3004 fprime = GradientOfCostFunction,
3006 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3007 gtol = selfA._parameters["GradientNormTolerance"],
3008 disp = selfA._parameters["optdisp"],
3011 elif selfA._parameters["Minimizer"] == "NCG":
3012 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3015 fprime = GradientOfCostFunction,
3017 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3018 avextol = selfA._parameters["CostDecrementTolerance"],
3019 disp = selfA._parameters["optdisp"],
3022 elif selfA._parameters["Minimizer"] == "BFGS":
3023 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3026 fprime = GradientOfCostFunction,
3028 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3029 gtol = selfA._parameters["GradientNormTolerance"],
3030 disp = selfA._parameters["optdisp"],
3034 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3036 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3037 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3039 # Correction pour pallier a un bug de TNC sur le retour du Minimum
3040 # ----------------------------------------------------------------
3041 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3042 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3044 # Obtention de l'analyse
3045 # ----------------------
3046 Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
3048 selfA.StoredVariables["Analysis"].store( Xa )
3050 # Calculs et/ou stockages supplémentaires
3051 # ---------------------------------------
3052 if selfA._toStore("BMA"):
3053 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3057 # ==============================================================================
3058 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3060 3DVAR variational analysis with no inversion of B
3067 Hm = HO["Direct"].appliedTo
3068 Ha = HO["Adjoint"].appliedInXTo
3070 # Précalcul des inversions de B et R
3074 # Point de démarrage de l'optimisation
3075 Xini = numpy.zeros(Xb.shape)
3077 # Définition de la fonction-coût
3078 # ------------------------------
3079 def CostFunction(v):
3080 _V = numpy.asmatrix(numpy.ravel( v )).T
3082 if selfA._parameters["StoreInternalVariables"] or \
3083 selfA._toStore("CurrentState") or \
3084 selfA._toStore("CurrentOptimum"):
3085 selfA.StoredVariables["CurrentState"].store( _X )
3087 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
3088 _Innovation = Y - _HX
3089 if selfA._toStore("SimulatedObservationAtCurrentState") or \
3090 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3091 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
3092 if selfA._toStore("InnovationAtCurrentState"):
3093 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3095 Jb = float( 0.5 * _V.T * BT * _V )
3096 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
3099 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3100 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3101 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3102 selfA.StoredVariables["CostFunctionJ" ].store( J )
3103 if selfA._toStore("IndexOfOptimum") or \
3104 selfA._toStore("CurrentOptimum") or \
3105 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3106 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3107 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
3108 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3109 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3110 if selfA._toStore("IndexOfOptimum"):
3111 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3112 if selfA._toStore("CurrentOptimum"):
3113 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3114 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3115 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
3116 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3117 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3118 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3119 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3120 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3121 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3124 def GradientOfCostFunction(v):
3125 _V = numpy.asmatrix(numpy.ravel( v )).T
3128 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
3130 GradJo = - Ha( (_X, RI * (Y - _HX)) )
3131 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
3134 # Minimisation de la fonctionnelle
3135 # --------------------------------
3136 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3138 if selfA._parameters["Minimizer"] == "LBFGSB":
3139 if "0.19" <= scipy.version.version <= "1.1.0":
3140 import lbfgsbhlt as optimiseur
3142 import scipy.optimize as optimiseur
3143 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3144 func = CostFunction,
3146 fprime = GradientOfCostFunction,
3148 bounds = selfA._parameters["Bounds"],
3149 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
3150 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
3151 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3152 iprint = selfA._parameters["optiprint"],
3154 nfeval = Informations['funcalls']
3155 rc = Informations['warnflag']
3156 elif selfA._parameters["Minimizer"] == "TNC":
3157 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3158 func = CostFunction,
3160 fprime = GradientOfCostFunction,
3162 bounds = selfA._parameters["Bounds"],
3163 maxfun = selfA._parameters["MaximumNumberOfSteps"],
3164 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3165 ftol = selfA._parameters["CostDecrementTolerance"],
3166 messages = selfA._parameters["optmessages"],
3168 elif selfA._parameters["Minimizer"] == "CG":
3169 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3172 fprime = GradientOfCostFunction,
3174 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3175 gtol = selfA._parameters["GradientNormTolerance"],
3176 disp = selfA._parameters["optdisp"],
3179 elif selfA._parameters["Minimizer"] == "NCG":
3180 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3183 fprime = GradientOfCostFunction,
3185 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3186 avextol = selfA._parameters["CostDecrementTolerance"],
3187 disp = selfA._parameters["optdisp"],
3190 elif selfA._parameters["Minimizer"] == "BFGS":
3191 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3194 fprime = GradientOfCostFunction,
3196 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3197 gtol = selfA._parameters["GradientNormTolerance"],
3198 disp = selfA._parameters["optdisp"],
3202 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3204 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3205 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3207 # Correction pour pallier a un bug de TNC sur le retour du Minimum
3208 # ----------------------------------------------------------------
3209 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3210 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3211 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
3213 Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
3215 # Obtention de l'analyse
3216 # ----------------------
3219 selfA.StoredVariables["Analysis"].store( Xa )
3221 if selfA._toStore("OMA") or \
3222 selfA._toStore("SigmaObs2") or \
3223 selfA._toStore("SimulationQuantiles") or \
3224 selfA._toStore("SimulatedObservationAtOptimum"):
3225 if selfA._toStore("SimulatedObservationAtCurrentState"):
3226 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3227 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3228 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3232 # Calcul de la covariance d'analyse
3233 # ---------------------------------
3234 if selfA._toStore("APosterioriCovariance") or \
3235 selfA._toStore("SimulationQuantiles") or \
3236 selfA._toStore("JacobianMatrixAtOptimum") or \
3237 selfA._toStore("KalmanGainAtOptimum"):
3238 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3239 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3240 if selfA._toStore("APosterioriCovariance") or \
3241 selfA._toStore("SimulationQuantiles") or \
3242 selfA._toStore("KalmanGainAtOptimum"):
3243 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3244 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3245 if selfA._toStore("APosterioriCovariance") or \
3246 selfA._toStore("SimulationQuantiles"):
3251 _ee = numpy.matrix(numpy.zeros(nb)).T
3253 _HtEE = numpy.dot(HtM,_ee)
3254 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
3255 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
3256 HessienneI = numpy.matrix( HessienneI )
3258 if min(A.shape) != max(A.shape):
3259 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
3260 if (numpy.diag(A) < 0).any():
3261 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
3262 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
3264 L = numpy.linalg.cholesky( A )
3266 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
3267 if selfA._toStore("APosterioriCovariance"):
3268 selfA.StoredVariables["APosterioriCovariance"].store( A )
3269 if selfA._toStore("JacobianMatrixAtOptimum"):
3270 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3271 if selfA._toStore("KalmanGainAtOptimum"):
3272 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3273 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3274 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3276 # Calculs et/ou stockages supplémentaires
3277 # ---------------------------------------
3278 if selfA._toStore("Innovation") or \
3279 selfA._toStore("SigmaObs2") or \
3280 selfA._toStore("MahalanobisConsistency") or \
3281 selfA._toStore("OMB"):
3283 if selfA._toStore("Innovation"):
3284 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
3285 if selfA._toStore("BMA"):
3286 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3287 if selfA._toStore("OMA"):
3288 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3289 if selfA._toStore("OMB"):
3290 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
3291 if selfA._toStore("SigmaObs2"):
3292 TraceR = R.trace(Y.size)
3293 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
3294 if selfA._toStore("MahalanobisConsistency"):
3295 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3296 if selfA._toStore("SimulationQuantiles"):
3297 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
3298 HXa = numpy.matrix(numpy.ravel( HXa )).T
3301 for i in range(nech):
3302 if selfA._parameters["SimulationForQuantiles"] == "Linear":
3303 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
3304 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
3306 if selfA._toStore("SampledStateForQuantiles"): Xr = Xa+dXr
3307 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
3308 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
3309 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
3312 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.ravel(Xr)
3314 YfQ = numpy.hstack((YfQ,Yr))
3315 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.vstack((EXr,numpy.ravel(Xr)))
3318 for quantile in selfA._parameters["Quantiles"]:
3319 if not (0. <= float(quantile) <= 1.): continue
3320 indice = int(nech * float(quantile) - 1./nech)
3321 if YQ is None: YQ = YfQ[:,indice]
3322 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
3323 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
3324 if selfA._toStore("SampledStateForQuantiles"):
3325 selfA.StoredVariables["SampledStateForQuantiles"].store( EXr.T )
3326 if selfA._toStore("SimulatedObservationAtBackground"):
3327 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
3328 if selfA._toStore("SimulatedObservationAtOptimum"):
3329 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
3333 # ==============================================================================
3334 if __name__ == "__main__":
3335 print('\n AUTODIAGNOSTIC\n')