1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les objets numériques génériques.
26 __author__ = "Jean-Philippe ARGAUD"
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
36 # ==============================================================================
37 def ExecuteFunction( triplet ):
38 assert len(triplet) == 3, "Incorrect number of arguments"
39 X, xArgs, funcrepr = triplet
40 __X = numpy.asmatrix(numpy.ravel( X )).T
41 __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42 __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43 __fonction = getattr(__module,funcrepr["__userFunction__name"])
44 sys.path = __sys_path_tmp ; del __sys_path_tmp
45 if isinstance(xArgs, dict):
46 __HX = __fonction( __X, **xArgs )
48 __HX = __fonction( __X )
49 return numpy.ravel( __HX )
51 # ==============================================================================
52 class FDApproximation(object):
54 Cette classe sert d'interface pour définir les opérateurs approximés. A la
55 création d'un objet, en fournissant une fonction "Function", on obtient un
56 objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
57 "AdjointOperator". On contrôle l'approximation DF avec l'incrément
58 multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
59 "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
60 centrées si le booléen "centeredDF" est vrai.
63 name = "FDApproximation",
68 extraArguments = None,
69 avoidingRedundancy = True,
70 toleranceInRedundancy = 1.e-18,
71 lenghtOfRedundancy = -1,
76 self.__name = str(name)
77 self.__extraArgs = extraArguments
80 import multiprocessing
81 self.__mpEnabled = True
83 self.__mpEnabled = False
85 self.__mpEnabled = False
86 self.__mpWorkers = mpWorkers
87 if self.__mpWorkers is not None and self.__mpWorkers < 1:
88 self.__mpWorkers = None
89 logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
92 self.__mfEnabled = True
94 self.__mfEnabled = False
95 logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
97 if avoidingRedundancy:
99 self.__tolerBP = float(toleranceInRedundancy)
100 self.__lenghtRJ = int(lenghtOfRedundancy)
101 self.__listJPCP = [] # Jacobian Previous Calculated Points
102 self.__listJPCI = [] # Jacobian Previous Calculated Increment
103 self.__listJPCR = [] # Jacobian Previous Calculated Results
104 self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
105 self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
107 self.__avoidRC = False
110 if isinstance(Function,types.FunctionType):
111 logging.debug("FDA Calculs en multiprocessing : FunctionType")
112 self.__userFunction__name = Function.__name__
114 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
116 mod = os.path.abspath(Function.__globals__['__file__'])
117 if not os.path.isfile(mod):
118 raise ImportError("No user defined function or method found with the name %s"%(mod,))
119 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
120 self.__userFunction__path = os.path.dirname(mod)
122 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
123 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
124 elif isinstance(Function,types.MethodType):
125 logging.debug("FDA Calculs en multiprocessing : MethodType")
126 self.__userFunction__name = Function.__name__
128 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
130 mod = os.path.abspath(Function.__func__.__globals__['__file__'])
131 if not os.path.isfile(mod):
132 raise ImportError("No user defined function or method found with the name %s"%(mod,))
133 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
134 self.__userFunction__path = os.path.dirname(mod)
136 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
137 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
139 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
141 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
142 self.__userFunction = self.__userOperator.appliedTo
144 self.__centeredDF = bool(centeredDF)
145 if abs(float(increment)) > 1.e-15:
146 self.__increment = float(increment)
148 self.__increment = 0.01
152 self.__dX = numpy.asmatrix(numpy.ravel( dX )).T
153 logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
155 logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
157 # ---------------------------------------------------------
158 def __doublon__(self, e, l, n, v=None):
159 __ac, __iac = False, -1
160 for i in range(len(l)-1,-1,-1):
161 if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
162 __ac, __iac = True, i
163 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
167 # ---------------------------------------------------------
168 def DirectOperator(self, X, **extraArgs ):
170 Calcul du direct à l'aide de la fonction fournie.
172 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
173 ne doivent pas être données ici à la fonction utilisateur.
175 logging.debug("FDA Calcul DirectOperator (explicite)")
177 _HX = self.__userFunction( X, argsAsSerie = True )
179 _X = numpy.asmatrix(numpy.ravel( X )).T
180 _HX = numpy.ravel(self.__userFunction( _X ))
184 # ---------------------------------------------------------
185 def TangentMatrix(self, X ):
187 Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
188 c'est-à-dire le gradient de H en X. On utilise des différences finies
189 directionnelles autour du point X. X est un numpy.matrix.
191 Différences finies centrées (approximation d'ordre 2):
192 1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
193 dX[i] à la composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
194 on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
196 2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
198 3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
200 Différences finies non centrées (approximation d'ordre 1):
201 1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
202 composante X[i] pour composer X_plus_dXi, et on calcule la réponse
203 HX_plus_dXi = H( X_plus_dXi )
204 2/ On calcule la valeur centrale HX = H(X)
205 3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
207 4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
210 logging.debug("FDA Début du calcul de la Jacobienne")
211 logging.debug("FDA Incrément de............: %s*X"%float(self.__increment))
212 logging.debug("FDA Approximation centrée...: %s"%(self.__centeredDF))
214 if X is None or len(X)==0:
215 raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
217 _X = numpy.asmatrix(numpy.ravel( X )).T
219 if self.__dX is None:
220 _dX = self.__increment * _X
222 _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
224 if (_dX == 0.).any():
227 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
229 _dX = numpy.where( _dX == 0., moyenne, _dX )
231 __alreadyCalculated = False
233 __bidon, __alreadyCalculatedP = self.__doublon__(_X, self.__listJPCP, self.__listJPPN, None)
234 __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
235 if __alreadyCalculatedP == __alreadyCalculatedI > -1:
236 __alreadyCalculated, __i = True, __alreadyCalculatedP
237 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
239 if __alreadyCalculated:
240 logging.debug("FDA Calcul Jacobienne (par récupération du doublon %i)"%__i)
241 _Jacobienne = self.__listJPCR[__i]
243 logging.debug("FDA Calcul Jacobienne (explicite)")
244 if self.__centeredDF:
246 if self.__mpEnabled and not self.__mfEnabled:
248 "__userFunction__path" : self.__userFunction__path,
249 "__userFunction__modl" : self.__userFunction__modl,
250 "__userFunction__name" : self.__userFunction__name,
253 for i in range( len(_dX) ):
255 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
256 _X_plus_dXi[i] = _X[i] + _dXi
257 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
258 _X_moins_dXi[i] = _X[i] - _dXi
260 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
261 _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
263 import multiprocessing
264 self.__pool = multiprocessing.Pool(self.__mpWorkers)
265 _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
270 for i in range( len(_dX) ):
271 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
273 elif self.__mfEnabled:
275 for i in range( len(_dX) ):
277 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
278 _X_plus_dXi[i] = _X[i] + _dXi
279 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
280 _X_moins_dXi[i] = _X[i] - _dXi
282 _xserie.append( _X_plus_dXi )
283 _xserie.append( _X_moins_dXi )
285 _HX_plusmoins_dX = self.DirectOperator( _xserie )
288 for i in range( len(_dX) ):
289 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
293 for i in range( _dX.size ):
295 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
296 _X_plus_dXi[i] = _X[i] + _dXi
297 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
298 _X_moins_dXi[i] = _X[i] - _dXi
300 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
301 _HX_moins_dXi = self.DirectOperator( _X_moins_dXi )
303 _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
307 if self.__mpEnabled and not self.__mfEnabled:
309 "__userFunction__path" : self.__userFunction__path,
310 "__userFunction__modl" : self.__userFunction__modl,
311 "__userFunction__name" : self.__userFunction__name,
314 _jobs.append( (_X.A1, self.__extraArgs, funcrepr) )
315 for i in range( len(_dX) ):
316 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
317 _X_plus_dXi[i] = _X[i] + _dX[i]
319 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
321 import multiprocessing
322 self.__pool = multiprocessing.Pool(self.__mpWorkers)
323 _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
327 _HX = _HX_plus_dX.pop(0)
330 for i in range( len(_dX) ):
331 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
333 elif self.__mfEnabled:
335 _xserie.append( _X.A1 )
336 for i in range( len(_dX) ):
337 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
338 _X_plus_dXi[i] = _X[i] + _dX[i]
340 _xserie.append( _X_plus_dXi )
342 _HX_plus_dX = self.DirectOperator( _xserie )
344 _HX = _HX_plus_dX.pop(0)
347 for i in range( len(_dX) ):
348 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
352 _HX = self.DirectOperator( _X )
353 for i in range( _dX.size ):
355 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
356 _X_plus_dXi[i] = _X[i] + _dXi
358 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
360 _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
363 _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
365 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
366 while len(self.__listJPCP) > self.__lenghtRJ:
367 self.__listJPCP.pop(0)
368 self.__listJPCI.pop(0)
369 self.__listJPCR.pop(0)
370 self.__listJPPN.pop(0)
371 self.__listJPIN.pop(0)
372 self.__listJPCP.append( copy.copy(_X) )
373 self.__listJPCI.append( copy.copy(_dX) )
374 self.__listJPCR.append( copy.copy(_Jacobienne) )
375 self.__listJPPN.append( numpy.linalg.norm(_X) )
376 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
378 logging.debug("FDA Fin du calcul de la Jacobienne")
382 # ---------------------------------------------------------
383 def TangentOperator(self, paire, **extraArgs ):
385 Calcul du tangent à l'aide de la Jacobienne.
387 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
388 ne doivent pas être données ici à la fonction utilisateur.
391 assert len(paire) == 1, "Incorrect lenght of arguments"
393 assert len(_paire) == 2, "Incorrect number of arguments"
395 assert len(paire) == 2, "Incorrect number of arguments"
398 _Jacobienne = self.TangentMatrix( X )
399 if dX is None or len(dX) == 0:
401 # Calcul de la forme matricielle si le second argument est None
402 # -------------------------------------------------------------
403 if self.__mfEnabled: return [_Jacobienne,]
404 else: return _Jacobienne
407 # Calcul de la valeur linéarisée de H en X appliqué à dX
408 # ------------------------------------------------------
409 _dX = numpy.asmatrix(numpy.ravel( dX )).T
410 _HtX = numpy.dot(_Jacobienne, _dX)
411 if self.__mfEnabled: return [_HtX.A1,]
414 # ---------------------------------------------------------
415 def AdjointOperator(self, paire, **extraArgs ):
417 Calcul de l'adjoint à l'aide de la Jacobienne.
419 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
420 ne doivent pas être données ici à la fonction utilisateur.
423 assert len(paire) == 1, "Incorrect lenght of arguments"
425 assert len(_paire) == 2, "Incorrect number of arguments"
427 assert len(paire) == 2, "Incorrect number of arguments"
430 _JacobienneT = self.TangentMatrix( X ).T
431 if Y is None or len(Y) == 0:
433 # Calcul de la forme matricielle si le second argument est None
434 # -------------------------------------------------------------
435 if self.__mfEnabled: return [_JacobienneT,]
436 else: return _JacobienneT
439 # Calcul de la valeur de l'adjoint en X appliqué à Y
440 # --------------------------------------------------
441 _Y = numpy.asmatrix(numpy.ravel( Y )).T
442 _HaY = numpy.dot(_JacobienneT, _Y)
443 if self.__mfEnabled: return [_HaY.A1,]
446 # ==============================================================================
447 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
448 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
450 _bgcenter = numpy.ravel(_bgcenter)[:,None]
452 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
454 if _bgcovariance is None:
455 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
457 _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
458 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
460 return BackgroundEnsemble
462 # ==============================================================================
463 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
464 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
465 def __CenteredRandomAnomalies(Zr, N):
467 Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
468 notes manuscrites de MB et conforme au code de PS avec eps = -1
471 Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
472 Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
473 R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
478 _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
480 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
481 if _bgcovariance is None:
482 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
485 U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
486 _nbctl = _bgcenter.size
487 if _nbmembers > _nbctl:
488 _Z = numpy.concatenate((numpy.dot(
489 numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
490 numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
492 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
493 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
494 BackgroundEnsemble = _bgcenter + _Zca
496 if max(abs(_bgcovariance.flatten())) > 0:
497 _nbctl = _bgcenter.size
498 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
499 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
500 BackgroundEnsemble = _bgcenter + _Zca
502 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
504 return BackgroundEnsemble
506 # ==============================================================================
507 def EnsembleOfAnomalies( Ensemble, OptMean = None, Normalisation = 1.):
508 "Renvoie les anomalies centrées à partir d'un ensemble TailleEtat*NbMembres"
510 __Em = numpy.asarray(Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
512 __Em = numpy.ravel(OptMean).reshape((-1,1))
514 return Normalisation * (numpy.asarray(Ensemble) - __Em)
516 # ==============================================================================
517 def EnsembleErrorCovariance( Ensemble, __quick = False ):
518 "Renvoie l'estimation empirique de la covariance d'ensemble"
520 # Covariance rapide mais rarement définie positive
521 __Covariance = numpy.cov(Ensemble)
523 # Résultat souvent identique à numpy.cov, mais plus robuste
524 __n, __m = numpy.asarray(Ensemble).shape
525 __Anomalies = EnsembleOfAnomalies( Ensemble )
526 # Estimation empirique
527 __Covariance = (__Anomalies @ __Anomalies.T) / (__m-1)
529 __Covariance = (__Covariance + __Covariance.T) * 0.5
530 # Assure la positivité
531 __epsilon = mpr*numpy.trace(__Covariance)
532 __Covariance = __Covariance + __epsilon * numpy.identity(__n)
536 # ==============================================================================
537 def EnsemblePerturbationWithGivenCovariance( __Ensemble, __Covariance, __Seed=None ):
538 "Ajout d'une perturbation à chaque membre d'un ensemble selon une covariance prescrite"
539 if hasattr(__Covariance,"assparsematrix"):
540 if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance.assparsematrix())/abs(__Ensemble).mean() < mpr).all():
541 # Traitement d'une covariance nulle ou presque
543 if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance.assparsematrix()) < mpr).all():
544 # Traitement d'une covariance nulle ou presque
547 if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance)/abs(__Ensemble).mean() < mpr).all():
548 # Traitement d'une covariance nulle ou presque
550 if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance) < mpr).all():
551 # Traitement d'une covariance nulle ou presque
554 __n, __m = __Ensemble.shape
555 if __Seed is not None: numpy.random.seed(__Seed)
557 if hasattr(__Covariance,"isscalar") and __Covariance.isscalar():
558 # Traitement d'une covariance multiple de l'identité
560 __std = numpy.sqrt(__Covariance.assparsematrix())
561 __Ensemble += numpy.random.normal(__zero, __std, size=(__m,__n)).T
563 elif hasattr(__Covariance,"isvector") and __Covariance.isvector():
564 # Traitement d'une covariance diagonale avec variances non identiques
565 __zero = numpy.zeros(__n)
566 __std = numpy.sqrt(__Covariance.assparsematrix())
567 __Ensemble += numpy.asarray([numpy.random.normal(__zero, __std) for i in range(__m)]).T
569 elif hasattr(__Covariance,"ismatrix") and __Covariance.ismatrix():
570 # Traitement d'une covariance pleine
571 __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance.asfullmatrix(__n), size=__m).T
573 elif isinstance(__Covariance, numpy.ndarray):
574 # Traitement d'une covariance numpy pleine, sachant qu'on arrive ici en dernier
575 __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance, size=__m).T
578 raise ValueError("Error in ensemble perturbation with inadequate covariance specification")
582 # ==============================================================================
583 def CovarianceInflation(
585 InflationType = None,
586 InflationFactor = None,
587 BackgroundCov = None,
590 Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
592 Synthèse : Hunt 2007, section 2.3.5
594 if InflationFactor is None:
597 InflationFactor = float(InflationFactor)
599 if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
600 if InflationFactor < 1.:
601 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
602 if InflationFactor < 1.+mpr:
604 OutputCovOrEns = InflationFactor**2 * InputCovOrEns
606 elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
607 if InflationFactor < 1.:
608 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
609 if InflationFactor < 1.+mpr:
611 InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
612 OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
613 + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
615 elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
616 if InflationFactor < 0.:
617 raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
618 if InflationFactor < mpr:
620 __n, __m = numpy.asarray(InputCovOrEns).shape
622 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
623 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
625 elif InflationType == "HybridOnBackgroundCovariance":
626 if InflationFactor < 0.:
627 raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
628 if InflationFactor < mpr:
630 __n, __m = numpy.asarray(InputCovOrEns).shape
632 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
633 if BackgroundCov is None:
634 raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
635 if InputCovOrEns.shape != BackgroundCov.shape:
636 raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
637 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
639 elif InflationType == "Relaxation":
640 raise NotImplementedError("InflationType Relaxation")
643 raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
645 return OutputCovOrEns
647 # ==============================================================================
648 def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None):
649 "Estimation des quantiles a posteriori (selfA est modifié)"
650 nbsamples = selfA._parameters["NumberOfSamplesForQuantiles"]
652 # Échantillonnage des états
655 if selfA._parameters["SimulationForQuantiles"] == "Linear":
656 HXa = numpy.matrix(numpy.ravel( HXa )).T
657 for i in range(nbsamples):
658 if selfA._parameters["SimulationForQuantiles"] == "Linear" and HtM is not None:
659 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
660 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
662 if selfA._toStore("SampledStateForQuantiles"): Xr = Xa+dXr
663 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear" and Hm is not None:
664 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
665 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
668 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.ravel(Xr)
670 YfQ = numpy.hstack((YfQ,Yr))
671 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.vstack((EXr,numpy.ravel(Xr)))
673 # Extraction des quantiles
676 for quantile in selfA._parameters["Quantiles"]:
677 if not (0. <= float(quantile) <= 1.): continue
678 indice = int(nbsamples * float(quantile) - 1./nbsamples)
679 if YQ is None: YQ = YfQ[:,indice]
680 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
681 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
682 if selfA._toStore("SampledStateForQuantiles"):
683 selfA.StoredVariables["SampledStateForQuantiles"].store( EXr.T )
687 # ==============================================================================
688 def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
694 H = HO["Direct"].appliedControledFormTo
696 if selfA._parameters["EstimationOf"] == "State":
697 M = EM["Direct"].appliedControledFormTo
699 if CM is not None and "Tangent" in CM and U is not None:
700 Cm = CM["Tangent"].asMatrix(Xb)
704 # Précalcul des inversions de B et R
707 # Durée d'observation et tailles
708 LagL = selfA._parameters["SmootherLagL"]
709 if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
710 raise ValueError("Fixed-lag smoother requires a series of observation")
711 if Y.stepnumber() < LagL:
712 raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
713 duration = Y.stepnumber()
714 __p = numpy.cumprod(Y.shape())[-1]
716 __m = selfA._parameters["NumberOfMembers"]
718 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
720 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
721 selfA.StoredVariables["Analysis"].store( Xb )
722 if selfA._toStore("APosterioriCovariance"):
723 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
726 # Calcul direct initial (on privilégie la mémorisation au recalcul)
727 __seed = numpy.random.get_state()
728 selfB = copy.deepcopy(selfA)
729 selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
730 if VariantM == "EnKS16-KalmanFilterFormula":
731 etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
733 raise ValueError("VariantM has to be chosen in the authorized methods list.")
735 EL = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
737 EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
738 selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
740 for step in range(LagL,duration-1):
742 sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
745 if hasattr(Y,"store"):
746 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
748 Ynpu = numpy.ravel( Y ).reshape((__p,1))
751 if hasattr(U,"store") and len(U)>1:
752 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
753 elif hasattr(U,"store") and len(U)==1:
754 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
756 Un = numpy.asmatrix(numpy.ravel( U )).T
760 #--------------------------
761 if VariantM == "EnKS16-KalmanFilterFormula":
762 if selfA._parameters["EstimationOf"] == "State": # Forecast
763 EL = M( [(EL[:,i], Un) for i in range(__m)],
765 returnSerieAsArrayMatrix = True )
766 EL = EnsemblePerturbationWithGivenCovariance( EL, Q )
767 EZ = H( [(EL[:,i], Un) for i in range(__m)],
769 returnSerieAsArrayMatrix = True )
770 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
771 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
773 elif selfA._parameters["EstimationOf"] == "Parameters":
774 # --- > Par principe, M = Id, Q = 0
775 EZ = H( [(EL[:,i], Un) for i in range(__m)],
777 returnSerieAsArrayMatrix = True )
779 vEm = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
780 vZm = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
782 mS = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
783 delta = RIdemi @ ( Ynpu - vZm )
784 mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
785 vw = mT @ mS.T @ delta
787 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
788 mU = numpy.identity(__m)
789 wTU = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
791 EX = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
795 for irl in range(LagL): # Lissage des L précédentes analysis
796 vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
797 EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
798 sEL[irl] = vEm + EX @ wTU
800 # Conservation de l'analyse retrospective d'ordre 0 avant rotation
801 Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
802 if selfA._toStore("APosterioriCovariance"):
805 for irl in range(LagL):
806 sEL[irl] = sEL[irl+1]
808 #--------------------------
810 raise ValueError("VariantM has to be chosen in the authorized methods list.")
812 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
814 selfA.StoredVariables["Analysis"].store( Xa )
815 if selfA._toStore("APosterioriCovariance"):
816 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
818 # Stockage des dernières analyses incomplètement remises à jour
819 for irl in range(LagL):
820 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
821 Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
822 selfA.StoredVariables["Analysis"].store( Xa )
826 # ==============================================================================
827 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
829 Ensemble-Transform EnKF
831 if selfA._parameters["EstimationOf"] == "Parameters":
832 selfA._parameters["StoreInternalVariables"] = True
836 H = HO["Direct"].appliedControledFormTo
838 if selfA._parameters["EstimationOf"] == "State":
839 M = EM["Direct"].appliedControledFormTo
841 if CM is not None and "Tangent" in CM and U is not None:
842 Cm = CM["Tangent"].asMatrix(Xb)
846 # Nombre de pas identique au nombre de pas d'observations
847 # -------------------------------------------------------
848 if hasattr(Y,"stepnumber"):
849 duration = Y.stepnumber()
850 __p = numpy.cumprod(Y.shape())[-1]
853 __p = numpy.array(Y).size
855 # Précalcul des inversions de B et R
856 # ----------------------------------
857 if selfA._parameters["StoreInternalVariables"] \
858 or selfA._toStore("CostFunctionJ") \
859 or selfA._toStore("CostFunctionJb") \
860 or selfA._toStore("CostFunctionJo") \
861 or selfA._toStore("CurrentOptimum") \
862 or selfA._toStore("APosterioriCovariance"):
865 elif VariantM != "KalmanFilterFormula":
867 if VariantM == "KalmanFilterFormula":
873 __m = selfA._parameters["NumberOfMembers"]
874 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
876 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
877 #~ Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
879 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
880 selfA.StoredVariables["Analysis"].store( Xb )
881 if selfA._toStore("APosterioriCovariance"):
882 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
885 previousJMinimum = numpy.finfo(float).max
887 for step in range(duration-1):
888 if hasattr(Y,"store"):
889 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
891 Ynpu = numpy.ravel( Y ).reshape((__p,1))
894 if hasattr(U,"store") and len(U)>1:
895 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
896 elif hasattr(U,"store") and len(U)==1:
897 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
899 Un = numpy.asmatrix(numpy.ravel( U )).T
903 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
904 Xn = CovarianceInflation( Xn,
905 selfA._parameters["InflationType"],
906 selfA._parameters["InflationFactor"],
909 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
910 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
912 returnSerieAsArrayMatrix = True )
913 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
914 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
916 returnSerieAsArrayMatrix = True )
917 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
918 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
919 Xn_predicted = Xn_predicted + Cm * Un
920 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
921 # --- > Par principe, M = Id, Q = 0
923 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
925 returnSerieAsArrayMatrix = True )
927 # Mean of forecast and observation of forecast
928 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
929 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
932 EaX = EnsembleOfAnomalies( Xn_predicted, Xfm )
933 EaHX = EnsembleOfAnomalies( HX_predicted, Hfm)
935 #--------------------------
936 if VariantM == "KalmanFilterFormula":
937 mS = RIdemi * EaHX / math.sqrt(__m-1)
938 delta = RIdemi * ( Ynpu - Hfm )
939 mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
940 vw = mT @ mS.T @ delta
942 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
943 mU = numpy.identity(__m)
945 EaX = EaX / math.sqrt(__m-1)
946 Xn = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
947 #--------------------------
948 elif VariantM == "Variational":
949 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
951 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
952 _Jo = 0.5 * _A.T @ (RI * _A)
953 _Jb = 0.5 * (__m-1) * w.T @ w
956 def GradientOfCostFunction(w):
957 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
958 _GardJo = - EaHX.T @ (RI * _A)
959 _GradJb = (__m-1) * w.reshape((__m,1))
960 _GradJ = _GardJo + _GradJb
961 return numpy.ravel(_GradJ)
962 vw = scipy.optimize.fmin_cg(
964 x0 = numpy.zeros(__m),
965 fprime = GradientOfCostFunction,
970 Hto = EaHX.T @ (RI * EaHX)
971 Htb = (__m-1) * numpy.identity(__m)
974 Pta = numpy.linalg.inv( Hta )
975 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
977 Xn = Xfm + EaX @ (vw[:,None] + EWa)
978 #--------------------------
979 elif VariantM == "FiniteSize11": # Jauge Boc2011
980 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
982 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
983 _Jo = 0.5 * _A.T @ (RI * _A)
984 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
987 def GradientOfCostFunction(w):
988 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
989 _GardJo = - EaHX.T @ (RI * _A)
990 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
991 _GradJ = _GardJo + _GradJb
992 return numpy.ravel(_GradJ)
993 vw = scipy.optimize.fmin_cg(
995 x0 = numpy.zeros(__m),
996 fprime = GradientOfCostFunction,
1001 Hto = EaHX.T @ (RI * EaHX)
1003 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1004 / (1 + 1/__m + vw.T @ vw)**2
1007 Pta = numpy.linalg.inv( Hta )
1008 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1010 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1011 #--------------------------
1012 elif VariantM == "FiniteSize15": # Jauge Boc2015
1013 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1014 def CostFunction(w):
1015 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1016 _Jo = 0.5 * _A.T * RI * _A
1017 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
1020 def GradientOfCostFunction(w):
1021 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1022 _GardJo = - EaHX.T @ (RI * _A)
1023 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1024 _GradJ = _GardJo + _GradJb
1025 return numpy.ravel(_GradJ)
1026 vw = scipy.optimize.fmin_cg(
1028 x0 = numpy.zeros(__m),
1029 fprime = GradientOfCostFunction,
1034 Hto = EaHX.T @ (RI * EaHX)
1036 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1037 / (1 + 1/__m + vw.T @ vw)**2
1040 Pta = numpy.linalg.inv( Hta )
1041 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1043 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1044 #--------------------------
1045 elif VariantM == "FiniteSize16": # Jauge Boc2016
1046 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1047 def CostFunction(w):
1048 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1049 _Jo = 0.5 * _A.T @ (RI * _A)
1050 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
1053 def GradientOfCostFunction(w):
1054 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1055 _GardJo = - EaHX.T @ (RI * _A)
1056 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
1057 _GradJ = _GardJo + _GradJb
1058 return numpy.ravel(_GradJ)
1059 vw = scipy.optimize.fmin_cg(
1061 x0 = numpy.zeros(__m),
1062 fprime = GradientOfCostFunction,
1067 Hto = EaHX.T @ (RI * EaHX)
1068 Htb = ((__m+1) / (__m-1)) * \
1069 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
1070 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
1073 Pta = numpy.linalg.inv( Hta )
1074 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1076 Xn = Xfm + EaX @ (vw[:,None] + EWa)
1077 #--------------------------
1079 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1081 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1082 Xn = CovarianceInflation( Xn,
1083 selfA._parameters["InflationType"],
1084 selfA._parameters["InflationFactor"],
1087 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1088 #--------------------------
1090 if selfA._parameters["StoreInternalVariables"] \
1091 or selfA._toStore("CostFunctionJ") \
1092 or selfA._toStore("CostFunctionJb") \
1093 or selfA._toStore("CostFunctionJo") \
1094 or selfA._toStore("APosterioriCovariance") \
1095 or selfA._toStore("InnovationAtCurrentAnalysis") \
1096 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1097 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1098 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1099 _Innovation = Ynpu - _HXa
1101 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1102 # ---> avec analysis
1103 selfA.StoredVariables["Analysis"].store( Xa )
1104 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1105 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1106 if selfA._toStore("InnovationAtCurrentAnalysis"):
1107 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1108 # ---> avec current state
1109 if selfA._parameters["StoreInternalVariables"] \
1110 or selfA._toStore("CurrentState"):
1111 selfA.StoredVariables["CurrentState"].store( Xn )
1112 if selfA._toStore("ForecastState"):
1113 selfA.StoredVariables["ForecastState"].store( EMX )
1114 if selfA._toStore("BMA"):
1115 selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
1116 if selfA._toStore("InnovationAtCurrentState"):
1117 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1118 if selfA._toStore("SimulatedObservationAtCurrentState") \
1119 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1120 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1122 if selfA._parameters["StoreInternalVariables"] \
1123 or selfA._toStore("CostFunctionJ") \
1124 or selfA._toStore("CostFunctionJb") \
1125 or selfA._toStore("CostFunctionJo") \
1126 or selfA._toStore("CurrentOptimum") \
1127 or selfA._toStore("APosterioriCovariance"):
1128 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1129 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1131 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1132 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1133 selfA.StoredVariables["CostFunctionJ" ].store( J )
1135 if selfA._toStore("IndexOfOptimum") \
1136 or selfA._toStore("CurrentOptimum") \
1137 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1138 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1139 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1140 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1141 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1142 if selfA._toStore("IndexOfOptimum"):
1143 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1144 if selfA._toStore("CurrentOptimum"):
1145 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1146 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1147 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1148 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1149 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1150 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1151 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1152 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1153 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1154 if selfA._toStore("APosterioriCovariance"):
1155 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1156 if selfA._parameters["EstimationOf"] == "Parameters" \
1157 and J < previousJMinimum:
1158 previousJMinimum = J
1160 if selfA._toStore("APosterioriCovariance"):
1161 covarianceXaMin = Pn
1162 # ---> Pour les smoothers
1163 if selfA._toStore("CurrentEnsembleState"):
1164 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
1166 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1167 # ----------------------------------------------------------------------
1168 if selfA._parameters["EstimationOf"] == "Parameters":
1169 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1170 selfA.StoredVariables["Analysis"].store( XaMin )
1171 if selfA._toStore("APosterioriCovariance"):
1172 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1173 if selfA._toStore("BMA"):
1174 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1178 # ==============================================================================
1179 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
1180 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1184 if selfA._parameters["EstimationOf"] == "Parameters":
1185 selfA._parameters["StoreInternalVariables"] = True
1189 H = HO["Direct"].appliedControledFormTo
1191 if selfA._parameters["EstimationOf"] == "State":
1192 M = EM["Direct"].appliedControledFormTo
1194 if CM is not None and "Tangent" in CM and U is not None:
1195 Cm = CM["Tangent"].asMatrix(Xb)
1199 # Nombre de pas identique au nombre de pas d'observations
1200 # -------------------------------------------------------
1201 if hasattr(Y,"stepnumber"):
1202 duration = Y.stepnumber()
1203 __p = numpy.cumprod(Y.shape())[-1]
1206 __p = numpy.array(Y).size
1208 # Précalcul des inversions de B et R
1209 # ----------------------------------
1210 if selfA._parameters["StoreInternalVariables"] \
1211 or selfA._toStore("CostFunctionJ") \
1212 or selfA._toStore("CostFunctionJb") \
1213 or selfA._toStore("CostFunctionJo") \
1214 or selfA._toStore("CurrentOptimum") \
1215 or selfA._toStore("APosterioriCovariance"):
1222 __m = selfA._parameters["NumberOfMembers"]
1223 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1225 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1227 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
1229 Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
1231 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1232 selfA.StoredVariables["Analysis"].store( Xb )
1233 if selfA._toStore("APosterioriCovariance"):
1234 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1237 previousJMinimum = numpy.finfo(float).max
1239 for step in range(duration-1):
1240 if hasattr(Y,"store"):
1241 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1243 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1246 if hasattr(U,"store") and len(U)>1:
1247 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1248 elif hasattr(U,"store") and len(U)==1:
1249 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1251 Un = numpy.asmatrix(numpy.ravel( U )).T
1255 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1256 Xn = CovarianceInflation( Xn,
1257 selfA._parameters["InflationType"],
1258 selfA._parameters["InflationFactor"],
1261 #--------------------------
1262 if VariantM == "IEnKF12":
1263 Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
1264 EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
1268 Ta = numpy.identity(__m)
1269 vw = numpy.zeros(__m)
1270 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1271 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1274 E1 = vx1 + _epsilon * EaX
1276 E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1278 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
1279 E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1281 returnSerieAsArrayMatrix = True )
1282 elif selfA._parameters["EstimationOf"] == "Parameters":
1283 # --- > Par principe, M = Id
1285 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1286 vy1 = H((vx2, Un)).reshape((__p,1))
1288 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
1290 returnSerieAsArrayMatrix = True )
1291 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1294 EaY = (HE2 - vy2) / _epsilon
1296 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1298 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
1299 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY)
1300 Deltaw = - numpy.linalg.solve(mH,GradJ)
1305 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1309 A2 = EnsembleOfAnomalies( E2 )
1312 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1313 A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
1316 #--------------------------
1318 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1320 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1321 Xn = CovarianceInflation( Xn,
1322 selfA._parameters["InflationType"],
1323 selfA._parameters["InflationFactor"],
1326 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1327 #--------------------------
1329 if selfA._parameters["StoreInternalVariables"] \
1330 or selfA._toStore("CostFunctionJ") \
1331 or selfA._toStore("CostFunctionJb") \
1332 or selfA._toStore("CostFunctionJo") \
1333 or selfA._toStore("APosterioriCovariance") \
1334 or selfA._toStore("InnovationAtCurrentAnalysis") \
1335 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1336 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1337 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1338 _Innovation = Ynpu - _HXa
1340 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1341 # ---> avec analysis
1342 selfA.StoredVariables["Analysis"].store( Xa )
1343 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1344 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1345 if selfA._toStore("InnovationAtCurrentAnalysis"):
1346 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1347 # ---> avec current state
1348 if selfA._parameters["StoreInternalVariables"] \
1349 or selfA._toStore("CurrentState"):
1350 selfA.StoredVariables["CurrentState"].store( Xn )
1351 if selfA._toStore("ForecastState"):
1352 selfA.StoredVariables["ForecastState"].store( E2 )
1353 if selfA._toStore("BMA"):
1354 selfA.StoredVariables["BMA"].store( E2 - Xa )
1355 if selfA._toStore("InnovationAtCurrentState"):
1356 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1357 if selfA._toStore("SimulatedObservationAtCurrentState") \
1358 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1359 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1361 if selfA._parameters["StoreInternalVariables"] \
1362 or selfA._toStore("CostFunctionJ") \
1363 or selfA._toStore("CostFunctionJb") \
1364 or selfA._toStore("CostFunctionJo") \
1365 or selfA._toStore("CurrentOptimum") \
1366 or selfA._toStore("APosterioriCovariance"):
1367 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1368 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1370 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1371 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1372 selfA.StoredVariables["CostFunctionJ" ].store( J )
1374 if selfA._toStore("IndexOfOptimum") \
1375 or selfA._toStore("CurrentOptimum") \
1376 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1377 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1378 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1379 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1380 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1381 if selfA._toStore("IndexOfOptimum"):
1382 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1383 if selfA._toStore("CurrentOptimum"):
1384 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1385 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1386 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1387 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1388 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1389 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1390 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1391 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1392 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1393 if selfA._toStore("APosterioriCovariance"):
1394 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1395 if selfA._parameters["EstimationOf"] == "Parameters" \
1396 and J < previousJMinimum:
1397 previousJMinimum = J
1399 if selfA._toStore("APosterioriCovariance"):
1400 covarianceXaMin = Pn
1402 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1403 # ----------------------------------------------------------------------
1404 if selfA._parameters["EstimationOf"] == "Parameters":
1405 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1406 selfA.StoredVariables["Analysis"].store( XaMin )
1407 if selfA._toStore("APosterioriCovariance"):
1408 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1409 if selfA._toStore("BMA"):
1410 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1414 # ==============================================================================
1415 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1423 # Opérateur non-linéaire pour la boucle externe
1424 Hm = HO["Direct"].appliedTo
1426 # Précalcul des inversions de B et R
1430 # Point de démarrage de l'optimisation
1431 Xini = selfA._parameters["InitializationPoint"]
1433 HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
1434 Innovation = Y - HXb
1441 Xr = Xini.reshape((-1,1))
1442 while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
1446 Ht = HO["Tangent"].asMatrix(Xr)
1447 Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
1449 # Définition de la fonction-coût
1450 # ------------------------------
1451 def CostFunction(dx):
1452 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1453 if selfA._parameters["StoreInternalVariables"] or \
1454 selfA._toStore("CurrentState") or \
1455 selfA._toStore("CurrentOptimum"):
1456 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
1458 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1459 _dInnovation = Innovation - _HdX
1460 if selfA._toStore("SimulatedObservationAtCurrentState") or \
1461 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1462 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
1463 if selfA._toStore("InnovationAtCurrentState"):
1464 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
1466 Jb = float( 0.5 * _dX.T * BI * _dX )
1467 Jo = float( 0.5 * _dInnovation.T * RI * _dInnovation )
1470 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1471 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1472 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1473 selfA.StoredVariables["CostFunctionJ" ].store( J )
1474 if selfA._toStore("IndexOfOptimum") or \
1475 selfA._toStore("CurrentOptimum") or \
1476 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1477 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1478 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1479 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1480 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1481 if selfA._toStore("IndexOfOptimum"):
1482 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1483 if selfA._toStore("CurrentOptimum"):
1484 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1485 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1486 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1487 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1488 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1489 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1490 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1491 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1492 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1495 def GradientOfCostFunction(dx):
1496 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1498 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1499 _dInnovation = Innovation - _HdX
1501 GradJo = - Ht.T @ (RI * _dInnovation)
1502 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1505 # Minimisation de la fonctionnelle
1506 # --------------------------------
1507 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1509 if selfA._parameters["Minimizer"] == "LBFGSB":
1510 # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1511 if "0.19" <= scipy.version.version <= "1.1.0":
1512 import lbfgsbhlt as optimiseur
1514 import scipy.optimize as optimiseur
1515 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1516 func = CostFunction,
1517 x0 = numpy.zeros(Xini.size),
1518 fprime = GradientOfCostFunction,
1520 bounds = selfA._parameters["Bounds"],
1521 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
1522 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
1523 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1524 iprint = selfA._parameters["optiprint"],
1526 nfeval = Informations['funcalls']
1527 rc = Informations['warnflag']
1528 elif selfA._parameters["Minimizer"] == "TNC":
1529 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1530 func = CostFunction,
1531 x0 = numpy.zeros(Xini.size),
1532 fprime = GradientOfCostFunction,
1534 bounds = selfA._parameters["Bounds"],
1535 maxfun = selfA._parameters["MaximumNumberOfSteps"],
1536 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1537 ftol = selfA._parameters["CostDecrementTolerance"],
1538 messages = selfA._parameters["optmessages"],
1540 elif selfA._parameters["Minimizer"] == "CG":
1541 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1543 x0 = numpy.zeros(Xini.size),
1544 fprime = GradientOfCostFunction,
1546 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1547 gtol = selfA._parameters["GradientNormTolerance"],
1548 disp = selfA._parameters["optdisp"],
1551 elif selfA._parameters["Minimizer"] == "NCG":
1552 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1554 x0 = numpy.zeros(Xini.size),
1555 fprime = GradientOfCostFunction,
1557 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1558 avextol = selfA._parameters["CostDecrementTolerance"],
1559 disp = selfA._parameters["optdisp"],
1562 elif selfA._parameters["Minimizer"] == "BFGS":
1563 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1565 x0 = numpy.zeros(Xini.size),
1566 fprime = GradientOfCostFunction,
1568 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1569 gtol = selfA._parameters["GradientNormTolerance"],
1570 disp = selfA._parameters["optdisp"],
1574 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1576 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1577 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1579 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1580 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1581 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1583 Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
1586 DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
1587 iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
1589 # Obtention de l'analyse
1590 # ----------------------
1593 selfA.StoredVariables["Analysis"].store( Xa )
1595 if selfA._toStore("OMA") or \
1596 selfA._toStore("SigmaObs2") or \
1597 selfA._toStore("SimulationQuantiles") or \
1598 selfA._toStore("SimulatedObservationAtOptimum"):
1599 if selfA._toStore("SimulatedObservationAtCurrentState"):
1600 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1601 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1602 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1606 # Calcul de la covariance d'analyse
1607 # ---------------------------------
1608 if selfA._toStore("APosterioriCovariance") or \
1609 selfA._toStore("SimulationQuantiles") or \
1610 selfA._toStore("JacobianMatrixAtOptimum") or \
1611 selfA._toStore("KalmanGainAtOptimum"):
1612 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1613 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1614 if selfA._toStore("APosterioriCovariance") or \
1615 selfA._toStore("SimulationQuantiles") or \
1616 selfA._toStore("KalmanGainAtOptimum"):
1617 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1618 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1619 if selfA._toStore("APosterioriCovariance") or \
1620 selfA._toStore("SimulationQuantiles"):
1624 _ee = numpy.matrix(numpy.zeros(nb)).T
1626 _HtEE = numpy.dot(HtM,_ee)
1627 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
1628 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1629 HessienneI = numpy.matrix( HessienneI )
1631 if min(A.shape) != max(A.shape):
1632 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1633 if (numpy.diag(A) < 0).any():
1634 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1635 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1637 L = numpy.linalg.cholesky( A )
1639 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1640 if selfA._toStore("APosterioriCovariance"):
1641 selfA.StoredVariables["APosterioriCovariance"].store( A )
1642 if selfA._toStore("JacobianMatrixAtOptimum"):
1643 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1644 if selfA._toStore("KalmanGainAtOptimum"):
1645 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1646 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1647 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1649 # Calculs et/ou stockages supplémentaires
1650 # ---------------------------------------
1651 if selfA._toStore("Innovation") or \
1652 selfA._toStore("SigmaObs2") or \
1653 selfA._toStore("MahalanobisConsistency") or \
1654 selfA._toStore("OMB"):
1656 if selfA._toStore("Innovation"):
1657 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1658 if selfA._toStore("BMA"):
1659 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1660 if selfA._toStore("OMA"):
1661 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1662 if selfA._toStore("OMB"):
1663 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1664 if selfA._toStore("SigmaObs2"):
1665 TraceR = R.trace(Y.size)
1666 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1667 if selfA._toStore("MahalanobisConsistency"):
1668 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1669 if selfA._toStore("SimulationQuantiles"):
1670 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
1671 if selfA._toStore("SimulatedObservationAtBackground"):
1672 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1673 if selfA._toStore("SimulatedObservationAtOptimum"):
1674 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1678 # ==============================================================================
1679 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
1680 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1682 Maximum Likelihood Ensemble Filter
1684 if selfA._parameters["EstimationOf"] == "Parameters":
1685 selfA._parameters["StoreInternalVariables"] = True
1689 H = HO["Direct"].appliedControledFormTo
1691 if selfA._parameters["EstimationOf"] == "State":
1692 M = EM["Direct"].appliedControledFormTo
1694 if CM is not None and "Tangent" in CM and U is not None:
1695 Cm = CM["Tangent"].asMatrix(Xb)
1699 # Nombre de pas identique au nombre de pas d'observations
1700 # -------------------------------------------------------
1701 if hasattr(Y,"stepnumber"):
1702 duration = Y.stepnumber()
1703 __p = numpy.cumprod(Y.shape())[-1]
1706 __p = numpy.array(Y).size
1708 # Précalcul des inversions de B et R
1709 # ----------------------------------
1710 if selfA._parameters["StoreInternalVariables"] \
1711 or selfA._toStore("CostFunctionJ") \
1712 or selfA._toStore("CostFunctionJb") \
1713 or selfA._toStore("CostFunctionJo") \
1714 or selfA._toStore("CurrentOptimum") \
1715 or selfA._toStore("APosterioriCovariance"):
1722 __m = selfA._parameters["NumberOfMembers"]
1723 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1725 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1727 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1729 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1730 selfA.StoredVariables["Analysis"].store( Xb )
1731 if selfA._toStore("APosterioriCovariance"):
1732 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1735 previousJMinimum = numpy.finfo(float).max
1737 for step in range(duration-1):
1738 if hasattr(Y,"store"):
1739 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1741 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1744 if hasattr(U,"store") and len(U)>1:
1745 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1746 elif hasattr(U,"store") and len(U)==1:
1747 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1749 Un = numpy.asmatrix(numpy.ravel( U )).T
1753 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1754 Xn = CovarianceInflation( Xn,
1755 selfA._parameters["InflationType"],
1756 selfA._parameters["InflationFactor"],
1759 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1760 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1762 returnSerieAsArrayMatrix = True )
1763 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
1764 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1765 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1766 Xn_predicted = Xn_predicted + Cm * Un
1767 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1768 # --- > Par principe, M = Id, Q = 0
1771 #--------------------------
1772 if VariantM == "MLEF13":
1773 Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
1774 EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
1775 Ua = numpy.identity(__m)
1779 Ta = numpy.identity(__m)
1780 vw = numpy.zeros(__m)
1781 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1782 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1785 E1 = vx1 + _epsilon * EaX
1787 E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1789 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1791 returnSerieAsArrayMatrix = True )
1792 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1795 EaY = (HE2 - vy2) / _epsilon
1797 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1799 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
1800 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY)
1801 Deltaw = - numpy.linalg.solve(mH,GradJ)
1806 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1811 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1813 Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
1814 #--------------------------
1816 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1818 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1819 Xn = CovarianceInflation( Xn,
1820 selfA._parameters["InflationType"],
1821 selfA._parameters["InflationFactor"],
1824 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1825 #--------------------------
1827 if selfA._parameters["StoreInternalVariables"] \
1828 or selfA._toStore("CostFunctionJ") \
1829 or selfA._toStore("CostFunctionJb") \
1830 or selfA._toStore("CostFunctionJo") \
1831 or selfA._toStore("APosterioriCovariance") \
1832 or selfA._toStore("InnovationAtCurrentAnalysis") \
1833 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1834 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1835 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1836 _Innovation = Ynpu - _HXa
1838 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1839 # ---> avec analysis
1840 selfA.StoredVariables["Analysis"].store( Xa )
1841 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1842 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1843 if selfA._toStore("InnovationAtCurrentAnalysis"):
1844 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1845 # ---> avec current state
1846 if selfA._parameters["StoreInternalVariables"] \
1847 or selfA._toStore("CurrentState"):
1848 selfA.StoredVariables["CurrentState"].store( Xn )
1849 if selfA._toStore("ForecastState"):
1850 selfA.StoredVariables["ForecastState"].store( EMX )
1851 if selfA._toStore("BMA"):
1852 selfA.StoredVariables["BMA"].store( EMX - Xa )
1853 if selfA._toStore("InnovationAtCurrentState"):
1854 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1855 if selfA._toStore("SimulatedObservationAtCurrentState") \
1856 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1857 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1859 if selfA._parameters["StoreInternalVariables"] \
1860 or selfA._toStore("CostFunctionJ") \
1861 or selfA._toStore("CostFunctionJb") \
1862 or selfA._toStore("CostFunctionJo") \
1863 or selfA._toStore("CurrentOptimum") \
1864 or selfA._toStore("APosterioriCovariance"):
1865 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1866 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1868 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1869 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1870 selfA.StoredVariables["CostFunctionJ" ].store( J )
1872 if selfA._toStore("IndexOfOptimum") \
1873 or selfA._toStore("CurrentOptimum") \
1874 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1875 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1876 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1877 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1878 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1879 if selfA._toStore("IndexOfOptimum"):
1880 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1881 if selfA._toStore("CurrentOptimum"):
1882 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1883 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1884 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1885 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1886 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1887 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1888 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1889 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1890 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1891 if selfA._toStore("APosterioriCovariance"):
1892 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1893 if selfA._parameters["EstimationOf"] == "Parameters" \
1894 and J < previousJMinimum:
1895 previousJMinimum = J
1897 if selfA._toStore("APosterioriCovariance"):
1898 covarianceXaMin = Pn
1900 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1901 # ----------------------------------------------------------------------
1902 if selfA._parameters["EstimationOf"] == "Parameters":
1903 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1904 selfA.StoredVariables["Analysis"].store( XaMin )
1905 if selfA._toStore("APosterioriCovariance"):
1906 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1907 if selfA._toStore("BMA"):
1908 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1912 # ==============================================================================
1924 Implémentation informatique de l'algorithme MMQR, basée sur la publication :
1925 David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
1926 Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
1929 # Recuperation des donnees et informations initiales
1930 # --------------------------------------------------
1931 variables = numpy.ravel( x0 )
1932 mesures = numpy.ravel( y )
1933 increment = sys.float_info[0]
1936 quantile = float(quantile)
1938 # Calcul des parametres du MM
1939 # ---------------------------
1940 tn = float(toler) / n
1941 e0 = -tn / math.log(tn)
1942 epsilon = (e0-tn)/(1+math.log(e0))
1944 # Calculs d'initialisation
1945 # ------------------------
1946 residus = mesures - numpy.ravel( func( variables ) )
1947 poids = 1./(epsilon+numpy.abs(residus))
1948 veps = 1. - 2. * quantile - residus * poids
1949 lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
1952 # Recherche iterative
1953 # -------------------
1954 while (increment > toler) and (iteration < maxfun) :
1957 Derivees = numpy.array(fprime(variables))
1958 Derivees = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
1959 DeriveesT = Derivees.transpose()
1960 M = numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
1961 SM = numpy.transpose(numpy.dot( DeriveesT , veps ))
1962 step = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
1964 variables = variables + step
1965 if bounds is not None:
1966 # Attention : boucle infinie à éviter si un intervalle est trop petit
1967 while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
1969 variables = variables - step
1970 residus = mesures - numpy.ravel( func(variables) )
1971 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
1973 while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
1975 variables = variables - step
1976 residus = mesures - numpy.ravel( func(variables) )
1977 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
1979 increment = lastsurrogate-surrogate
1980 poids = 1./(epsilon+numpy.abs(residus))
1981 veps = 1. - 2. * quantile - residus * poids
1982 lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
1986 Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
1988 return variables, Ecart, [n,p,iteration,increment,0]
1990 # ==============================================================================
1991 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
1993 3DVAR multi-pas et multi-méthodes
1998 Xn = numpy.ravel(Xb).reshape((-1,1))
2000 if selfA._parameters["EstimationOf"] == "State":
2001 M = EM["Direct"].appliedTo
2003 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2004 selfA.StoredVariables["Analysis"].store( Xn )
2005 if selfA._toStore("APosterioriCovariance"):
2006 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(Xn.size)
2008 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2009 if selfA._toStore("ForecastState"):
2010 selfA.StoredVariables["ForecastState"].store( Xn )
2012 if hasattr(Y,"stepnumber"):
2013 duration = Y.stepnumber()
2019 for step in range(duration-1):
2020 if hasattr(Y,"store"):
2021 Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
2023 Ynpu = numpy.ravel( Y ).reshape((-1,1))
2025 if selfA._parameters["EstimationOf"] == "State": # Forecast
2026 Xn = selfA.StoredVariables["Analysis"][-1]
2027 Xn_predicted = M( Xn )
2028 if selfA._toStore("ForecastState"):
2029 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
2030 elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
2031 # --- > Par principe, M = Id, Q = 0
2033 Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
2035 oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
2039 # ==============================================================================
2040 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2049 Hm = HO["Direct"].appliedTo
2051 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
2052 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2053 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2056 HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2057 if Y.size != HXb.size:
2058 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2059 if max(Y.shape) != max(HXb.shape):
2060 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2062 if selfA._toStore("JacobianMatrixAtBackground"):
2063 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2064 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2065 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2067 Ht = HO["Tangent"].asMatrix(Xb)
2069 HBHTpR = R + Ht * BHT
2070 Innovation = Y - HXb
2072 # Point de démarrage de l'optimisation
2073 Xini = numpy.zeros(Xb.shape)
2075 # Définition de la fonction-coût
2076 # ------------------------------
2077 def CostFunction(w):
2078 _W = numpy.asmatrix(numpy.ravel( w )).T
2079 if selfA._parameters["StoreInternalVariables"] or \
2080 selfA._toStore("CurrentState") or \
2081 selfA._toStore("CurrentOptimum"):
2082 selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
2083 if selfA._toStore("SimulatedObservationAtCurrentState") or \
2084 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2085 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
2086 if selfA._toStore("InnovationAtCurrentState"):
2087 selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
2089 Jb = float( 0.5 * _W.T * HBHTpR * _W )
2090 Jo = float( - _W.T * Innovation )
2093 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2094 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2095 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2096 selfA.StoredVariables["CostFunctionJ" ].store( J )
2097 if selfA._toStore("IndexOfOptimum") or \
2098 selfA._toStore("CurrentOptimum") or \
2099 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2100 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2101 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2102 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2103 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2104 if selfA._toStore("IndexOfOptimum"):
2105 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2106 if selfA._toStore("CurrentOptimum"):
2107 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2108 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2109 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2110 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2111 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2112 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2113 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2114 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2115 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2118 def GradientOfCostFunction(w):
2119 _W = numpy.asmatrix(numpy.ravel( w )).T
2120 GradJb = HBHTpR * _W
2121 GradJo = - Innovation
2122 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2125 # Minimisation de la fonctionnelle
2126 # --------------------------------
2127 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2129 if selfA._parameters["Minimizer"] == "LBFGSB":
2130 if "0.19" <= scipy.version.version <= "1.1.0":
2131 import lbfgsbhlt as optimiseur
2133 import scipy.optimize as optimiseur
2134 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2135 func = CostFunction,
2137 fprime = GradientOfCostFunction,
2139 bounds = selfA._parameters["Bounds"],
2140 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
2141 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
2142 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2143 iprint = selfA._parameters["optiprint"],
2145 nfeval = Informations['funcalls']
2146 rc = Informations['warnflag']
2147 elif selfA._parameters["Minimizer"] == "TNC":
2148 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2149 func = CostFunction,
2151 fprime = GradientOfCostFunction,
2153 bounds = selfA._parameters["Bounds"],
2154 maxfun = selfA._parameters["MaximumNumberOfSteps"],
2155 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2156 ftol = selfA._parameters["CostDecrementTolerance"],
2157 messages = selfA._parameters["optmessages"],
2159 elif selfA._parameters["Minimizer"] == "CG":
2160 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2163 fprime = GradientOfCostFunction,
2165 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2166 gtol = selfA._parameters["GradientNormTolerance"],
2167 disp = selfA._parameters["optdisp"],
2170 elif selfA._parameters["Minimizer"] == "NCG":
2171 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2174 fprime = GradientOfCostFunction,
2176 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2177 avextol = selfA._parameters["CostDecrementTolerance"],
2178 disp = selfA._parameters["optdisp"],
2181 elif selfA._parameters["Minimizer"] == "BFGS":
2182 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2185 fprime = GradientOfCostFunction,
2187 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2188 gtol = selfA._parameters["GradientNormTolerance"],
2189 disp = selfA._parameters["optdisp"],
2193 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2195 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2196 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2198 # Correction pour pallier a un bug de TNC sur le retour du Minimum
2199 # ----------------------------------------------------------------
2200 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2201 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2202 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
2204 Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
2206 # Obtention de l'analyse
2207 # ----------------------
2210 selfA.StoredVariables["Analysis"].store( Xa )
2212 if selfA._toStore("OMA") or \
2213 selfA._toStore("SigmaObs2") or \
2214 selfA._toStore("SimulationQuantiles") or \
2215 selfA._toStore("SimulatedObservationAtOptimum"):
2216 if selfA._toStore("SimulatedObservationAtCurrentState"):
2217 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2218 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2219 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2223 # Calcul de la covariance d'analyse
2224 # ---------------------------------
2225 if selfA._toStore("APosterioriCovariance") or \
2226 selfA._toStore("SimulationQuantiles") or \
2227 selfA._toStore("JacobianMatrixAtOptimum") or \
2228 selfA._toStore("KalmanGainAtOptimum"):
2229 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2230 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2231 if selfA._toStore("APosterioriCovariance") or \
2232 selfA._toStore("SimulationQuantiles") or \
2233 selfA._toStore("KalmanGainAtOptimum"):
2234 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2235 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2236 if selfA._toStore("APosterioriCovariance") or \
2237 selfA._toStore("SimulationQuantiles"):
2243 _ee = numpy.matrix(numpy.zeros(nb)).T
2245 _HtEE = numpy.dot(HtM,_ee)
2246 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
2247 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2248 HessienneI = numpy.matrix( HessienneI )
2250 if min(A.shape) != max(A.shape):
2251 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2252 if (numpy.diag(A) < 0).any():
2253 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2254 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2256 L = numpy.linalg.cholesky( A )
2258 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2259 if selfA._toStore("APosterioriCovariance"):
2260 selfA.StoredVariables["APosterioriCovariance"].store( A )
2261 if selfA._toStore("JacobianMatrixAtOptimum"):
2262 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2263 if selfA._toStore("KalmanGainAtOptimum"):
2264 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2265 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2266 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2268 # Calculs et/ou stockages supplémentaires
2269 # ---------------------------------------
2270 if selfA._toStore("Innovation") or \
2271 selfA._toStore("SigmaObs2") or \
2272 selfA._toStore("MahalanobisConsistency") or \
2273 selfA._toStore("OMB"):
2275 if selfA._toStore("Innovation"):
2276 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2277 if selfA._toStore("BMA"):
2278 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2279 if selfA._toStore("OMA"):
2280 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2281 if selfA._toStore("OMB"):
2282 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2283 if selfA._toStore("SigmaObs2"):
2284 TraceR = R.trace(Y.size)
2285 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2286 if selfA._toStore("MahalanobisConsistency"):
2287 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2288 if selfA._toStore("SimulationQuantiles"):
2289 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
2290 if selfA._toStore("SimulatedObservationAtBackground"):
2291 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2292 if selfA._toStore("SimulatedObservationAtOptimum"):
2293 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2297 # ==============================================================================
2298 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
2302 if selfA._parameters["EstimationOf"] == "Parameters":
2303 selfA._parameters["StoreInternalVariables"] = True
2306 H = HO["Direct"].appliedControledFormTo
2308 if selfA._parameters["EstimationOf"] == "State":
2309 M = EM["Direct"].appliedControledFormTo
2311 if CM is not None and "Tangent" in CM and U is not None:
2312 Cm = CM["Tangent"].asMatrix(Xb)
2316 # Durée d'observation et tailles
2317 if hasattr(Y,"stepnumber"):
2318 duration = Y.stepnumber()
2319 __p = numpy.cumprod(Y.shape())[-1]
2322 __p = numpy.array(Y).size
2324 # Précalcul des inversions de B et R
2325 if selfA._parameters["StoreInternalVariables"] \
2326 or selfA._toStore("CostFunctionJ") \
2327 or selfA._toStore("CostFunctionJb") \
2328 or selfA._toStore("CostFunctionJo") \
2329 or selfA._toStore("CurrentOptimum") \
2330 or selfA._toStore("APosterioriCovariance"):
2335 __m = selfA._parameters["NumberOfMembers"]
2337 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2339 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2341 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2343 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2344 selfA.StoredVariables["Analysis"].store( Xb )
2345 if selfA._toStore("APosterioriCovariance"):
2346 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2349 previousJMinimum = numpy.finfo(float).max
2351 for step in range(duration-1):
2352 if hasattr(Y,"store"):
2353 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2355 Ynpu = numpy.ravel( Y ).reshape((__p,1))
2358 if hasattr(U,"store") and len(U)>1:
2359 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2360 elif hasattr(U,"store") and len(U)==1:
2361 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2363 Un = numpy.asmatrix(numpy.ravel( U )).T
2367 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2368 Xn = CovarianceInflation( Xn,
2369 selfA._parameters["InflationType"],
2370 selfA._parameters["InflationFactor"],
2373 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2374 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2376 returnSerieAsArrayMatrix = True )
2377 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
2378 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2380 returnSerieAsArrayMatrix = True )
2381 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2382 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2383 Xn_predicted = Xn_predicted + Cm * Un
2384 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2385 # --- > Par principe, M = Id, Q = 0
2387 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2389 returnSerieAsArrayMatrix = True )
2391 # Mean of forecast and observation of forecast
2392 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2393 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2395 #--------------------------
2396 if VariantM == "KalmanFilterFormula05":
2397 PfHT, HPfHT = 0., 0.
2398 for i in range(__m):
2399 Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
2400 Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
2401 PfHT += Exfi * Eyfi.T
2402 HPfHT += Eyfi * Eyfi.T
2403 PfHT = (1./(__m-1)) * PfHT
2404 HPfHT = (1./(__m-1)) * HPfHT
2405 Kn = PfHT * ( R + HPfHT ).I
2408 for i in range(__m):
2409 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
2410 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
2411 #--------------------------
2412 elif VariantM == "KalmanFilterFormula16":
2413 EpY = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
2414 EpYm = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2416 EaX = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
2417 EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
2419 Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
2421 for i in range(__m):
2422 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
2423 #--------------------------
2425 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2427 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2428 Xn = CovarianceInflation( Xn,
2429 selfA._parameters["InflationType"],
2430 selfA._parameters["InflationFactor"],
2433 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2434 #--------------------------
2436 if selfA._parameters["StoreInternalVariables"] \
2437 or selfA._toStore("CostFunctionJ") \
2438 or selfA._toStore("CostFunctionJb") \
2439 or selfA._toStore("CostFunctionJo") \
2440 or selfA._toStore("APosterioriCovariance") \
2441 or selfA._toStore("InnovationAtCurrentAnalysis") \
2442 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2443 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2444 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2445 _Innovation = Ynpu - _HXa
2447 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2448 # ---> avec analysis
2449 selfA.StoredVariables["Analysis"].store( Xa )
2450 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2451 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2452 if selfA._toStore("InnovationAtCurrentAnalysis"):
2453 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2454 # ---> avec current state
2455 if selfA._parameters["StoreInternalVariables"] \
2456 or selfA._toStore("CurrentState"):
2457 selfA.StoredVariables["CurrentState"].store( Xn )
2458 if selfA._toStore("ForecastState"):
2459 selfA.StoredVariables["ForecastState"].store( EMX )
2460 if selfA._toStore("BMA"):
2461 selfA.StoredVariables["BMA"].store( EMX - Xa )
2462 if selfA._toStore("InnovationAtCurrentState"):
2463 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
2464 if selfA._toStore("SimulatedObservationAtCurrentState") \
2465 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2466 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2468 if selfA._parameters["StoreInternalVariables"] \
2469 or selfA._toStore("CostFunctionJ") \
2470 or selfA._toStore("CostFunctionJb") \
2471 or selfA._toStore("CostFunctionJo") \
2472 or selfA._toStore("CurrentOptimum") \
2473 or selfA._toStore("APosterioriCovariance"):
2474 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2475 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2477 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2478 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2479 selfA.StoredVariables["CostFunctionJ" ].store( J )
2481 if selfA._toStore("IndexOfOptimum") \
2482 or selfA._toStore("CurrentOptimum") \
2483 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2484 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2485 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2486 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2487 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2488 if selfA._toStore("IndexOfOptimum"):
2489 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2490 if selfA._toStore("CurrentOptimum"):
2491 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2492 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2493 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2494 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2495 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2496 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2497 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2498 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2499 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2500 if selfA._toStore("APosterioriCovariance"):
2501 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2502 if selfA._parameters["EstimationOf"] == "Parameters" \
2503 and J < previousJMinimum:
2504 previousJMinimum = J
2506 if selfA._toStore("APosterioriCovariance"):
2507 covarianceXaMin = Pn
2509 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2510 # ----------------------------------------------------------------------
2511 if selfA._parameters["EstimationOf"] == "Parameters":
2512 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2513 selfA.StoredVariables["Analysis"].store( XaMin )
2514 if selfA._toStore("APosterioriCovariance"):
2515 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2516 if selfA._toStore("BMA"):
2517 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2521 # ==============================================================================
2522 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2531 Hm = HO["Direct"].appliedTo
2532 Ha = HO["Adjoint"].appliedInXTo
2534 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
2535 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2536 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2539 HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2540 if Y.size != HXb.size:
2541 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2542 if max(Y.shape) != max(HXb.shape):
2543 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2545 if selfA._toStore("JacobianMatrixAtBackground"):
2546 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2547 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2548 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2550 # Précalcul des inversions de B et R
2554 # Point de démarrage de l'optimisation
2555 Xini = selfA._parameters["InitializationPoint"]
2557 # Définition de la fonction-coût
2558 # ------------------------------
2559 def CostFunction(x):
2560 _X = numpy.asmatrix(numpy.ravel( x )).T
2561 if selfA._parameters["StoreInternalVariables"] or \
2562 selfA._toStore("CurrentState") or \
2563 selfA._toStore("CurrentOptimum"):
2564 selfA.StoredVariables["CurrentState"].store( _X )
2566 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2567 _Innovation = Y - _HX
2568 if selfA._toStore("SimulatedObservationAtCurrentState") or \
2569 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2570 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
2571 if selfA._toStore("InnovationAtCurrentState"):
2572 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
2574 Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
2575 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2578 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2579 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2580 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2581 selfA.StoredVariables["CostFunctionJ" ].store( J )
2582 if selfA._toStore("IndexOfOptimum") or \
2583 selfA._toStore("CurrentOptimum") or \
2584 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2585 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2586 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2587 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2588 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2589 if selfA._toStore("IndexOfOptimum"):
2590 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2591 if selfA._toStore("CurrentOptimum"):
2592 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2593 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2594 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2595 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2596 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2597 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2598 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2599 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2600 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2603 def GradientOfCostFunction(x):
2604 _X = numpy.asmatrix(numpy.ravel( x )).T
2606 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2607 GradJb = BI * (_X - Xb)
2608 GradJo = - Ha( (_X, RI * (Y - _HX)) )
2609 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2612 # Minimisation de la fonctionnelle
2613 # --------------------------------
2614 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2616 if selfA._parameters["Minimizer"] == "LBFGSB":
2617 if "0.19" <= scipy.version.version <= "1.1.0":
2618 import lbfgsbhlt as optimiseur
2620 import scipy.optimize as optimiseur
2621 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2622 func = CostFunction,
2624 fprime = GradientOfCostFunction,
2626 bounds = selfA._parameters["Bounds"],
2627 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
2628 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
2629 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2630 iprint = selfA._parameters["optiprint"],
2632 nfeval = Informations['funcalls']
2633 rc = Informations['warnflag']
2634 elif selfA._parameters["Minimizer"] == "TNC":
2635 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2636 func = CostFunction,
2638 fprime = GradientOfCostFunction,
2640 bounds = selfA._parameters["Bounds"],
2641 maxfun = selfA._parameters["MaximumNumberOfSteps"],
2642 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2643 ftol = selfA._parameters["CostDecrementTolerance"],
2644 messages = selfA._parameters["optmessages"],
2646 elif selfA._parameters["Minimizer"] == "CG":
2647 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2650 fprime = GradientOfCostFunction,
2652 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2653 gtol = selfA._parameters["GradientNormTolerance"],
2654 disp = selfA._parameters["optdisp"],
2657 elif selfA._parameters["Minimizer"] == "NCG":
2658 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2661 fprime = GradientOfCostFunction,
2663 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2664 avextol = selfA._parameters["CostDecrementTolerance"],
2665 disp = selfA._parameters["optdisp"],
2668 elif selfA._parameters["Minimizer"] == "BFGS":
2669 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2672 fprime = GradientOfCostFunction,
2674 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2675 gtol = selfA._parameters["GradientNormTolerance"],
2676 disp = selfA._parameters["optdisp"],
2680 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2682 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2683 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2685 # Correction pour pallier a un bug de TNC sur le retour du Minimum
2686 # ----------------------------------------------------------------
2687 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2688 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2690 # Obtention de l'analyse
2691 # ----------------------
2692 Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
2694 selfA.StoredVariables["Analysis"].store( Xa )
2696 if selfA._toStore("OMA") or \
2697 selfA._toStore("SigmaObs2") or \
2698 selfA._toStore("SimulationQuantiles") or \
2699 selfA._toStore("SimulatedObservationAtOptimum"):
2700 if selfA._toStore("SimulatedObservationAtCurrentState"):
2701 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2702 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2703 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2707 # Calcul de la covariance d'analyse
2708 # ---------------------------------
2709 if selfA._toStore("APosterioriCovariance") or \
2710 selfA._toStore("SimulationQuantiles") or \
2711 selfA._toStore("JacobianMatrixAtOptimum") or \
2712 selfA._toStore("KalmanGainAtOptimum"):
2713 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2714 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2715 if selfA._toStore("APosterioriCovariance") or \
2716 selfA._toStore("SimulationQuantiles") or \
2717 selfA._toStore("KalmanGainAtOptimum"):
2718 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2719 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2720 if selfA._toStore("APosterioriCovariance") or \
2721 selfA._toStore("SimulationQuantiles"):
2725 _ee = numpy.matrix(numpy.zeros(nb)).T
2727 _HtEE = numpy.dot(HtM,_ee)
2728 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
2729 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2730 HessienneI = numpy.matrix( HessienneI )
2732 if min(A.shape) != max(A.shape):
2733 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2734 if (numpy.diag(A) < 0).any():
2735 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2736 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2738 L = numpy.linalg.cholesky( A )
2740 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2741 if selfA._toStore("APosterioriCovariance"):
2742 selfA.StoredVariables["APosterioriCovariance"].store( A )
2743 if selfA._toStore("JacobianMatrixAtOptimum"):
2744 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2745 if selfA._toStore("KalmanGainAtOptimum"):
2746 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2747 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2748 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2750 # Calculs et/ou stockages supplémentaires
2751 # ---------------------------------------
2752 if selfA._toStore("Innovation") or \
2753 selfA._toStore("SigmaObs2") or \
2754 selfA._toStore("MahalanobisConsistency") or \
2755 selfA._toStore("OMB"):
2757 if selfA._toStore("Innovation"):
2758 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2759 if selfA._toStore("BMA"):
2760 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2761 if selfA._toStore("OMA"):
2762 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2763 if selfA._toStore("OMB"):
2764 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2765 if selfA._toStore("SigmaObs2"):
2766 TraceR = R.trace(Y.size)
2767 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2768 if selfA._toStore("MahalanobisConsistency"):
2769 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2770 if selfA._toStore("SimulationQuantiles"):
2771 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
2772 if selfA._toStore("SimulatedObservationAtBackground"):
2773 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2774 if selfA._toStore("SimulatedObservationAtOptimum"):
2775 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2779 # ==============================================================================
2780 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2789 Hm = HO["Direct"].appliedControledFormTo
2790 Mm = EM["Direct"].appliedControledFormTo
2792 if CM is not None and "Tangent" in CM and U is not None:
2793 Cm = CM["Tangent"].asMatrix(Xb)
2799 if hasattr(U,"store") and 1<=_step<len(U) :
2800 _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
2801 elif hasattr(U,"store") and len(U)==1:
2802 _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2804 _Un = numpy.asmatrix(numpy.ravel( U )).T
2809 if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
2810 _Cm = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
2816 # Remarque : les observations sont exploitées à partir du pas de temps
2817 # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
2818 # Donc le pas 0 n'est pas utilisé puisque la première étape commence
2819 # avec l'observation du pas 1.
2821 # Nombre de pas identique au nombre de pas d'observations
2822 if hasattr(Y,"stepnumber"):
2823 duration = Y.stepnumber()
2827 # Précalcul des inversions de B et R
2831 # Point de démarrage de l'optimisation
2832 Xini = selfA._parameters["InitializationPoint"]
2834 # Définition de la fonction-coût
2835 # ------------------------------
2836 selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
2837 selfA.DirectInnovation = [None,] # Le pas 0 n'est pas observé
2838 def CostFunction(x):
2839 _X = numpy.asmatrix(numpy.ravel( x )).T
2840 if selfA._parameters["StoreInternalVariables"] or \
2841 selfA._toStore("CurrentState") or \
2842 selfA._toStore("CurrentOptimum"):
2843 selfA.StoredVariables["CurrentState"].store( _X )
2844 Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
2845 selfA.DirectCalculation = [None,]
2846 selfA.DirectInnovation = [None,]
2849 for step in range(0,duration-1):
2850 if hasattr(Y,"store"):
2851 _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
2853 _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
2857 if selfA._parameters["EstimationOf"] == "State":
2858 _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
2859 elif selfA._parameters["EstimationOf"] == "Parameters":
2862 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
2863 _Xn = numpy.max(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
2864 _Xn = numpy.min(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
2866 # Etape de différence aux observations
2867 if selfA._parameters["EstimationOf"] == "State":
2868 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
2869 elif selfA._parameters["EstimationOf"] == "Parameters":
2870 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
2872 # Stockage de l'état
2873 selfA.DirectCalculation.append( _Xn )
2874 selfA.DirectInnovation.append( _YmHMX )
2876 # Ajout dans la fonctionnelle d'observation
2877 Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
2880 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2881 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2882 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2883 selfA.StoredVariables["CostFunctionJ" ].store( J )
2884 if selfA._toStore("IndexOfOptimum") or \
2885 selfA._toStore("CurrentOptimum") or \
2886 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2887 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2888 selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2889 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2890 if selfA._toStore("IndexOfOptimum"):
2891 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2892 if selfA._toStore("CurrentOptimum"):
2893 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2894 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2895 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2896 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2897 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2898 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2899 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2902 def GradientOfCostFunction(x):
2903 _X = numpy.asmatrix(numpy.ravel( x )).T
2904 GradJb = BI * (_X - Xb)
2906 for step in range(duration-1,0,-1):
2907 # Étape de récupération du dernier stockage de l'évolution
2908 _Xn = selfA.DirectCalculation.pop()
2909 # Étape de récupération du dernier stockage de l'innovation
2910 _YmHMX = selfA.DirectInnovation.pop()
2911 # Calcul des adjoints
2912 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
2913 Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
2914 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
2915 Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
2916 # Calcul du gradient par état adjoint
2917 GradJo = GradJo + Ha * RI * _YmHMX # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
2918 GradJo = Ma * GradJo # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
2919 GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
2922 # Minimisation de la fonctionnelle
2923 # --------------------------------
2924 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2926 if selfA._parameters["Minimizer"] == "LBFGSB":
2927 if "0.19" <= scipy.version.version <= "1.1.0":
2928 import lbfgsbhlt as optimiseur
2930 import scipy.optimize as optimiseur
2931 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2932 func = CostFunction,
2934 fprime = GradientOfCostFunction,
2936 bounds = selfA._parameters["Bounds"],
2937 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
2938 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
2939 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2940 iprint = selfA._parameters["optiprint"],
2942 nfeval = Informations['funcalls']
2943 rc = Informations['warnflag']
2944 elif selfA._parameters["Minimizer"] == "TNC":
2945 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2946 func = CostFunction,
2948 fprime = GradientOfCostFunction,
2950 bounds = selfA._parameters["Bounds"],
2951 maxfun = selfA._parameters["MaximumNumberOfSteps"],
2952 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2953 ftol = selfA._parameters["CostDecrementTolerance"],
2954 messages = selfA._parameters["optmessages"],
2956 elif selfA._parameters["Minimizer"] == "CG":
2957 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2960 fprime = GradientOfCostFunction,
2962 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2963 gtol = selfA._parameters["GradientNormTolerance"],
2964 disp = selfA._parameters["optdisp"],
2967 elif selfA._parameters["Minimizer"] == "NCG":
2968 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2971 fprime = GradientOfCostFunction,
2973 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2974 avextol = selfA._parameters["CostDecrementTolerance"],
2975 disp = selfA._parameters["optdisp"],
2978 elif selfA._parameters["Minimizer"] == "BFGS":
2979 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2982 fprime = GradientOfCostFunction,
2984 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2985 gtol = selfA._parameters["GradientNormTolerance"],
2986 disp = selfA._parameters["optdisp"],
2990 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2992 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2993 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2995 # Correction pour pallier a un bug de TNC sur le retour du Minimum
2996 # ----------------------------------------------------------------
2997 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2998 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3000 # Obtention de l'analyse
3001 # ----------------------
3002 Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
3004 selfA.StoredVariables["Analysis"].store( Xa )
3006 # Calculs et/ou stockages supplémentaires
3007 # ---------------------------------------
3008 if selfA._toStore("BMA"):
3009 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3013 # ==============================================================================
3014 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3016 3DVAR variational analysis with no inversion of B
3023 Hm = HO["Direct"].appliedTo
3024 Ha = HO["Adjoint"].appliedInXTo
3026 # Précalcul des inversions de B et R
3030 # Point de démarrage de l'optimisation
3031 Xini = numpy.zeros(Xb.shape)
3033 # Définition de la fonction-coût
3034 # ------------------------------
3035 def CostFunction(v):
3036 _V = numpy.asmatrix(numpy.ravel( v )).T
3038 if selfA._parameters["StoreInternalVariables"] or \
3039 selfA._toStore("CurrentState") or \
3040 selfA._toStore("CurrentOptimum"):
3041 selfA.StoredVariables["CurrentState"].store( _X )
3043 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
3044 _Innovation = Y - _HX
3045 if selfA._toStore("SimulatedObservationAtCurrentState") or \
3046 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3047 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
3048 if selfA._toStore("InnovationAtCurrentState"):
3049 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3051 Jb = float( 0.5 * _V.T * BT * _V )
3052 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
3055 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3056 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3057 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3058 selfA.StoredVariables["CostFunctionJ" ].store( J )
3059 if selfA._toStore("IndexOfOptimum") or \
3060 selfA._toStore("CurrentOptimum") or \
3061 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3062 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3063 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
3064 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3065 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3066 if selfA._toStore("IndexOfOptimum"):
3067 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3068 if selfA._toStore("CurrentOptimum"):
3069 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3070 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3071 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
3072 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3073 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3074 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3075 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3076 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3077 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3080 def GradientOfCostFunction(v):
3081 _V = numpy.asmatrix(numpy.ravel( v )).T
3084 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
3086 GradJo = - Ha( (_X, RI * (Y - _HX)) )
3087 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
3090 # Minimisation de la fonctionnelle
3091 # --------------------------------
3092 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3094 if selfA._parameters["Minimizer"] == "LBFGSB":
3095 if "0.19" <= scipy.version.version <= "1.1.0":
3096 import lbfgsbhlt as optimiseur
3098 import scipy.optimize as optimiseur
3099 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3100 func = CostFunction,
3102 fprime = GradientOfCostFunction,
3104 bounds = selfA._parameters["Bounds"],
3105 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
3106 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
3107 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3108 iprint = selfA._parameters["optiprint"],
3110 nfeval = Informations['funcalls']
3111 rc = Informations['warnflag']
3112 elif selfA._parameters["Minimizer"] == "TNC":
3113 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3114 func = CostFunction,
3116 fprime = GradientOfCostFunction,
3118 bounds = selfA._parameters["Bounds"],
3119 maxfun = selfA._parameters["MaximumNumberOfSteps"],
3120 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3121 ftol = selfA._parameters["CostDecrementTolerance"],
3122 messages = selfA._parameters["optmessages"],
3124 elif selfA._parameters["Minimizer"] == "CG":
3125 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3128 fprime = GradientOfCostFunction,
3130 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3131 gtol = selfA._parameters["GradientNormTolerance"],
3132 disp = selfA._parameters["optdisp"],
3135 elif selfA._parameters["Minimizer"] == "NCG":
3136 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3139 fprime = GradientOfCostFunction,
3141 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3142 avextol = selfA._parameters["CostDecrementTolerance"],
3143 disp = selfA._parameters["optdisp"],
3146 elif selfA._parameters["Minimizer"] == "BFGS":
3147 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3150 fprime = GradientOfCostFunction,
3152 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3153 gtol = selfA._parameters["GradientNormTolerance"],
3154 disp = selfA._parameters["optdisp"],
3158 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3160 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3161 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3163 # Correction pour pallier a un bug de TNC sur le retour du Minimum
3164 # ----------------------------------------------------------------
3165 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3166 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3167 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
3169 Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
3171 # Obtention de l'analyse
3172 # ----------------------
3175 selfA.StoredVariables["Analysis"].store( Xa )
3177 if selfA._toStore("OMA") or \
3178 selfA._toStore("SigmaObs2") or \
3179 selfA._toStore("SimulationQuantiles") or \
3180 selfA._toStore("SimulatedObservationAtOptimum"):
3181 if selfA._toStore("SimulatedObservationAtCurrentState"):
3182 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3183 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3184 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3188 # Calcul de la covariance d'analyse
3189 # ---------------------------------
3190 if selfA._toStore("APosterioriCovariance") or \
3191 selfA._toStore("SimulationQuantiles") or \
3192 selfA._toStore("JacobianMatrixAtOptimum") or \
3193 selfA._toStore("KalmanGainAtOptimum"):
3194 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3195 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3196 if selfA._toStore("APosterioriCovariance") or \
3197 selfA._toStore("SimulationQuantiles") or \
3198 selfA._toStore("KalmanGainAtOptimum"):
3199 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3200 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3201 if selfA._toStore("APosterioriCovariance") or \
3202 selfA._toStore("SimulationQuantiles"):
3207 _ee = numpy.matrix(numpy.zeros(nb)).T
3209 _HtEE = numpy.dot(HtM,_ee)
3210 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
3211 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
3212 HessienneI = numpy.matrix( HessienneI )
3214 if min(A.shape) != max(A.shape):
3215 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
3216 if (numpy.diag(A) < 0).any():
3217 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
3218 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
3220 L = numpy.linalg.cholesky( A )
3222 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
3223 if selfA._toStore("APosterioriCovariance"):
3224 selfA.StoredVariables["APosterioriCovariance"].store( A )
3225 if selfA._toStore("JacobianMatrixAtOptimum"):
3226 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3227 if selfA._toStore("KalmanGainAtOptimum"):
3228 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3229 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3230 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3232 # Calculs et/ou stockages supplémentaires
3233 # ---------------------------------------
3234 if selfA._toStore("Innovation") or \
3235 selfA._toStore("SigmaObs2") or \
3236 selfA._toStore("MahalanobisConsistency") or \
3237 selfA._toStore("OMB"):
3239 if selfA._toStore("Innovation"):
3240 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
3241 if selfA._toStore("BMA"):
3242 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3243 if selfA._toStore("OMA"):
3244 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3245 if selfA._toStore("OMB"):
3246 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
3247 if selfA._toStore("SigmaObs2"):
3248 TraceR = R.trace(Y.size)
3249 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
3250 if selfA._toStore("MahalanobisConsistency"):
3251 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3252 if selfA._toStore("SimulationQuantiles"):
3253 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
3254 if selfA._toStore("SimulatedObservationAtBackground"):
3255 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
3256 if selfA._toStore("SimulatedObservationAtOptimum"):
3257 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
3261 # ==============================================================================
3262 if __name__ == "__main__":
3263 print('\n AUTODIAGNOSTIC\n')