1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les objets numériques génériques.
26 __author__ = "Jean-Philippe ARGAUD"
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
36 # ==============================================================================
37 def ExecuteFunction( triplet ):
38 assert len(triplet) == 3, "Incorrect number of arguments"
39 X, xArgs, funcrepr = triplet
40 __X = numpy.asmatrix(numpy.ravel( X )).T
41 __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42 __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43 __fonction = getattr(__module,funcrepr["__userFunction__name"])
44 sys.path = __sys_path_tmp ; del __sys_path_tmp
45 if isinstance(xArgs, dict):
46 __HX = __fonction( __X, **xArgs )
48 __HX = __fonction( __X )
49 return numpy.ravel( __HX )
51 # ==============================================================================
52 class FDApproximation(object):
54 Cette classe sert d'interface pour définir les opérateurs approximés. A la
55 création d'un objet, en fournissant une fonction "Function", on obtient un
56 objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
57 "AdjointOperator". On contrôle l'approximation DF avec l'incrément
58 multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
59 "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
60 centrées si le booléen "centeredDF" est vrai.
63 name = "FDApproximation",
68 extraArguments = None,
69 avoidingRedundancy = True,
70 toleranceInRedundancy = 1.e-18,
71 lenghtOfRedundancy = -1,
76 self.__name = str(name)
77 self.__extraArgs = extraArguments
80 import multiprocessing
81 self.__mpEnabled = True
83 self.__mpEnabled = False
85 self.__mpEnabled = False
86 self.__mpWorkers = mpWorkers
87 if self.__mpWorkers is not None and self.__mpWorkers < 1:
88 self.__mpWorkers = None
89 logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
92 self.__mfEnabled = True
94 self.__mfEnabled = False
95 logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
97 if avoidingRedundancy:
99 self.__tolerBP = float(toleranceInRedundancy)
100 self.__lenghtRJ = int(lenghtOfRedundancy)
101 self.__listJPCP = [] # Jacobian Previous Calculated Points
102 self.__listJPCI = [] # Jacobian Previous Calculated Increment
103 self.__listJPCR = [] # Jacobian Previous Calculated Results
104 self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
105 self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
107 self.__avoidRC = False
110 if isinstance(Function,types.FunctionType):
111 logging.debug("FDA Calculs en multiprocessing : FunctionType")
112 self.__userFunction__name = Function.__name__
114 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
116 mod = os.path.abspath(Function.__globals__['__file__'])
117 if not os.path.isfile(mod):
118 raise ImportError("No user defined function or method found with the name %s"%(mod,))
119 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
120 self.__userFunction__path = os.path.dirname(mod)
122 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
123 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
124 elif isinstance(Function,types.MethodType):
125 logging.debug("FDA Calculs en multiprocessing : MethodType")
126 self.__userFunction__name = Function.__name__
128 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
130 mod = os.path.abspath(Function.__func__.__globals__['__file__'])
131 if not os.path.isfile(mod):
132 raise ImportError("No user defined function or method found with the name %s"%(mod,))
133 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
134 self.__userFunction__path = os.path.dirname(mod)
136 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
137 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
139 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
141 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
142 self.__userFunction = self.__userOperator.appliedTo
144 self.__centeredDF = bool(centeredDF)
145 if abs(float(increment)) > 1.e-15:
146 self.__increment = float(increment)
148 self.__increment = 0.01
152 self.__dX = numpy.asmatrix(numpy.ravel( dX )).T
153 logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
155 logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
157 # ---------------------------------------------------------
158 def __doublon__(self, e, l, n, v=None):
159 __ac, __iac = False, -1
160 for i in range(len(l)-1,-1,-1):
161 if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
162 __ac, __iac = True, i
163 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
167 # ---------------------------------------------------------
168 def DirectOperator(self, X, **extraArgs ):
170 Calcul du direct à l'aide de la fonction fournie.
172 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
173 ne doivent pas être données ici à la fonction utilisateur.
175 logging.debug("FDA Calcul DirectOperator (explicite)")
177 _HX = self.__userFunction( X, argsAsSerie = True )
179 _X = numpy.asmatrix(numpy.ravel( X )).T
180 _HX = numpy.ravel(self.__userFunction( _X ))
184 # ---------------------------------------------------------
185 def TangentMatrix(self, X ):
187 Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
188 c'est-à-dire le gradient de H en X. On utilise des différences finies
189 directionnelles autour du point X. X est un numpy.matrix.
191 Différences finies centrées (approximation d'ordre 2):
192 1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
193 dX[i] à la composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
194 on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
196 2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
198 3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
200 Différences finies non centrées (approximation d'ordre 1):
201 1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
202 composante X[i] pour composer X_plus_dXi, et on calcule la réponse
203 HX_plus_dXi = H( X_plus_dXi )
204 2/ On calcule la valeur centrale HX = H(X)
205 3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
207 4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
210 logging.debug("FDA Début du calcul de la Jacobienne")
211 logging.debug("FDA Incrément de............: %s*X"%float(self.__increment))
212 logging.debug("FDA Approximation centrée...: %s"%(self.__centeredDF))
214 if X is None or len(X)==0:
215 raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
217 _X = numpy.asmatrix(numpy.ravel( X )).T
219 if self.__dX is None:
220 _dX = self.__increment * _X
222 _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
224 if (_dX == 0.).any():
227 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
229 _dX = numpy.where( _dX == 0., moyenne, _dX )
231 __alreadyCalculated = False
233 __bidon, __alreadyCalculatedP = self.__doublon__(_X, self.__listJPCP, self.__listJPPN, None)
234 __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
235 if __alreadyCalculatedP == __alreadyCalculatedI > -1:
236 __alreadyCalculated, __i = True, __alreadyCalculatedP
237 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
239 if __alreadyCalculated:
240 logging.debug("FDA Calcul Jacobienne (par récupération du doublon %i)"%__i)
241 _Jacobienne = self.__listJPCR[__i]
243 logging.debug("FDA Calcul Jacobienne (explicite)")
244 if self.__centeredDF:
246 if self.__mpEnabled and not self.__mfEnabled:
248 "__userFunction__path" : self.__userFunction__path,
249 "__userFunction__modl" : self.__userFunction__modl,
250 "__userFunction__name" : self.__userFunction__name,
253 for i in range( len(_dX) ):
255 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
256 _X_plus_dXi[i] = _X[i] + _dXi
257 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
258 _X_moins_dXi[i] = _X[i] - _dXi
260 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
261 _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
263 import multiprocessing
264 self.__pool = multiprocessing.Pool(self.__mpWorkers)
265 _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
270 for i in range( len(_dX) ):
271 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
273 elif self.__mfEnabled:
275 for i in range( len(_dX) ):
277 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
278 _X_plus_dXi[i] = _X[i] + _dXi
279 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
280 _X_moins_dXi[i] = _X[i] - _dXi
282 _xserie.append( _X_plus_dXi )
283 _xserie.append( _X_moins_dXi )
285 _HX_plusmoins_dX = self.DirectOperator( _xserie )
288 for i in range( len(_dX) ):
289 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
293 for i in range( _dX.size ):
295 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
296 _X_plus_dXi[i] = _X[i] + _dXi
297 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
298 _X_moins_dXi[i] = _X[i] - _dXi
300 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
301 _HX_moins_dXi = self.DirectOperator( _X_moins_dXi )
303 _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
307 if self.__mpEnabled and not self.__mfEnabled:
309 "__userFunction__path" : self.__userFunction__path,
310 "__userFunction__modl" : self.__userFunction__modl,
311 "__userFunction__name" : self.__userFunction__name,
314 _jobs.append( (_X.A1, self.__extraArgs, funcrepr) )
315 for i in range( len(_dX) ):
316 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
317 _X_plus_dXi[i] = _X[i] + _dX[i]
319 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
321 import multiprocessing
322 self.__pool = multiprocessing.Pool(self.__mpWorkers)
323 _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
327 _HX = _HX_plus_dX.pop(0)
330 for i in range( len(_dX) ):
331 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
333 elif self.__mfEnabled:
335 _xserie.append( _X.A1 )
336 for i in range( len(_dX) ):
337 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
338 _X_plus_dXi[i] = _X[i] + _dX[i]
340 _xserie.append( _X_plus_dXi )
342 _HX_plus_dX = self.DirectOperator( _xserie )
344 _HX = _HX_plus_dX.pop(0)
347 for i in range( len(_dX) ):
348 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
352 _HX = self.DirectOperator( _X )
353 for i in range( _dX.size ):
355 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
356 _X_plus_dXi[i] = _X[i] + _dXi
358 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
360 _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
363 _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
365 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
366 while len(self.__listJPCP) > self.__lenghtRJ:
367 self.__listJPCP.pop(0)
368 self.__listJPCI.pop(0)
369 self.__listJPCR.pop(0)
370 self.__listJPPN.pop(0)
371 self.__listJPIN.pop(0)
372 self.__listJPCP.append( copy.copy(_X) )
373 self.__listJPCI.append( copy.copy(_dX) )
374 self.__listJPCR.append( copy.copy(_Jacobienne) )
375 self.__listJPPN.append( numpy.linalg.norm(_X) )
376 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
378 logging.debug("FDA Fin du calcul de la Jacobienne")
382 # ---------------------------------------------------------
383 def TangentOperator(self, paire, **extraArgs ):
385 Calcul du tangent à l'aide de la Jacobienne.
387 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
388 ne doivent pas être données ici à la fonction utilisateur.
391 assert len(paire) == 1, "Incorrect lenght of arguments"
393 assert len(_paire) == 2, "Incorrect number of arguments"
395 assert len(paire) == 2, "Incorrect number of arguments"
398 _Jacobienne = self.TangentMatrix( X )
399 if dX is None or len(dX) == 0:
401 # Calcul de la forme matricielle si le second argument est None
402 # -------------------------------------------------------------
403 if self.__mfEnabled: return [_Jacobienne,]
404 else: return _Jacobienne
407 # Calcul de la valeur linéarisée de H en X appliqué à dX
408 # ------------------------------------------------------
409 _dX = numpy.asmatrix(numpy.ravel( dX )).T
410 _HtX = numpy.dot(_Jacobienne, _dX)
411 if self.__mfEnabled: return [_HtX.A1,]
414 # ---------------------------------------------------------
415 def AdjointOperator(self, paire, **extraArgs ):
417 Calcul de l'adjoint à l'aide de la Jacobienne.
419 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
420 ne doivent pas être données ici à la fonction utilisateur.
423 assert len(paire) == 1, "Incorrect lenght of arguments"
425 assert len(_paire) == 2, "Incorrect number of arguments"
427 assert len(paire) == 2, "Incorrect number of arguments"
430 _JacobienneT = self.TangentMatrix( X ).T
431 if Y is None or len(Y) == 0:
433 # Calcul de la forme matricielle si le second argument est None
434 # -------------------------------------------------------------
435 if self.__mfEnabled: return [_JacobienneT,]
436 else: return _JacobienneT
439 # Calcul de la valeur de l'adjoint en X appliqué à Y
440 # --------------------------------------------------
441 _Y = numpy.asmatrix(numpy.ravel( Y )).T
442 _HaY = numpy.dot(_JacobienneT, _Y)
443 if self.__mfEnabled: return [_HaY.A1,]
446 # ==============================================================================
447 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
448 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
450 _bgcenter = numpy.ravel(_bgcenter)[:,None]
452 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
454 if _bgcovariance is None:
455 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
457 _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
458 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
460 return BackgroundEnsemble
462 # ==============================================================================
463 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
464 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
465 def __CenteredRandomAnomalies(Zr, N):
467 Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
468 notes manuscrites de MB et conforme au code de PS avec eps = -1
471 Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
472 Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
473 R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
478 _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
480 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
481 if _bgcovariance is None:
482 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
485 U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
486 _nbctl = _bgcenter.size
487 if _nbmembers > _nbctl:
488 _Z = numpy.concatenate((numpy.dot(
489 numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
490 numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
492 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
493 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
494 BackgroundEnsemble = _bgcenter + _Zca
496 if max(abs(_bgcovariance.flatten())) > 0:
497 _nbctl = _bgcenter.size
498 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
499 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
500 BackgroundEnsemble = _bgcenter + _Zca
502 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
504 return BackgroundEnsemble
506 # ==============================================================================
507 def EnsembleOfAnomalies( Ensemble, OptMean = None, Normalisation = 1.):
508 "Renvoie les anomalies centrées à partir d'un ensemble TailleEtat*NbMembres"
510 __Em = numpy.asarray(Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
512 __Em = numpy.ravel(OptMean).reshape((-1,1))
514 return Normalisation * (numpy.asarray(Ensemble) - __Em)
516 # ==============================================================================
517 def EnsembleErrorCovariance( Ensemble, __quick = False ):
518 "Renvoie l'estimation empirique de la covariance d'ensemble"
520 # Covariance rapide mais rarement définie positive
521 __Covariance = numpy.cov(Ensemble)
523 # Résultat souvent identique à numpy.cov, mais plus robuste
524 __n, __m = numpy.asarray(Ensemble).shape
525 __Anomalies = EnsembleOfAnomalies( Ensemble )
526 # Estimation empirique
527 __Covariance = (__Anomalies @ __Anomalies.T) / (__m-1)
529 __Covariance = (__Covariance + __Covariance.T) * 0.5
530 # Assure la positivité
531 __epsilon = mpr*numpy.trace(__Covariance)
532 __Covariance = __Covariance + __epsilon * numpy.identity(__n)
536 # ==============================================================================
537 def EnsemblePerturbationWithGivenCovariance( __Ensemble, __Covariance, __Seed=None ):
538 "Ajout d'une perturbation à chaque membre d'un ensemble selon une covariance prescrite"
539 if hasattr(__Covariance,"assparsematrix"):
540 if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance.assparsematrix())/abs(__Ensemble).mean() < mpr).all():
541 # Traitement d'une covariance nulle ou presque
543 if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance.assparsematrix()) < mpr).all():
544 # Traitement d'une covariance nulle ou presque
547 if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance)/abs(__Ensemble).mean() < mpr).all():
548 # Traitement d'une covariance nulle ou presque
550 if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance) < mpr).all():
551 # Traitement d'une covariance nulle ou presque
554 __n, __m = __Ensemble.shape
555 if __Seed is not None: numpy.random.seed(__Seed)
557 if hasattr(__Covariance,"isscalar") and __Covariance.isscalar():
558 # Traitement d'une covariance multiple de l'identité
560 __std = numpy.sqrt(__Covariance.assparsematrix())
561 __Ensemble += numpy.random.normal(__zero, __std, size=(__m,__n)).T
563 elif hasattr(__Covariance,"isvector") and __Covariance.isvector():
564 # Traitement d'une covariance diagonale avec variances non identiques
565 __zero = numpy.zeros(__n)
566 __std = numpy.sqrt(__Covariance.assparsematrix())
567 __Ensemble += numpy.asarray([numpy.random.normal(__zero, __std) for i in range(__m)]).T
569 elif hasattr(__Covariance,"ismatrix") and __Covariance.ismatrix():
570 # Traitement d'une covariance pleine
571 __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance.asfullmatrix(__n), size=__m).T
573 elif isinstance(__Covariance, numpy.ndarray):
574 # Traitement d'une covariance numpy pleine, sachant qu'on arrive ici en dernier
575 __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance, size=__m).T
578 raise ValueError("Error in ensemble perturbation with inadequate covariance specification")
582 # ==============================================================================
583 def CovarianceInflation(
585 InflationType = None,
586 InflationFactor = None,
587 BackgroundCov = None,
590 Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
592 Synthèse : Hunt 2007, section 2.3.5
594 if InflationFactor is None:
597 InflationFactor = float(InflationFactor)
599 if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
600 if InflationFactor < 1.:
601 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
602 if InflationFactor < 1.+mpr:
604 OutputCovOrEns = InflationFactor**2 * InputCovOrEns
606 elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
607 if InflationFactor < 1.:
608 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
609 if InflationFactor < 1.+mpr:
611 InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
612 OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
613 + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
615 elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
616 if InflationFactor < 0.:
617 raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
618 if InflationFactor < mpr:
620 __n, __m = numpy.asarray(InputCovOrEns).shape
622 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
623 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
625 elif InflationType == "HybridOnBackgroundCovariance":
626 if InflationFactor < 0.:
627 raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
628 if InflationFactor < mpr:
630 __n, __m = numpy.asarray(InputCovOrEns).shape
632 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
633 if BackgroundCov is None:
634 raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
635 if InputCovOrEns.shape != BackgroundCov.shape:
636 raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
637 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
639 elif InflationType == "Relaxation":
640 raise NotImplementedError("InflationType Relaxation")
643 raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
645 return OutputCovOrEns
647 # ==============================================================================
648 def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None):
649 "Estimation des quantiles a posteriori (selfA est modifié)"
650 nbsamples = selfA._parameters["NumberOfSamplesForQuantiles"]
652 # Traitement des bornes
653 if "QBounds" in selfA._parameters: LBounds = selfA._parameters["QBounds"] # Prioritaire
654 else: LBounds = selfA._parameters["Bounds"] # Défaut raisonnable
655 if LBounds is not None:
656 def NoneRemove(paire):
658 if bmin is None: bmin = numpy.finfo('float').min
659 if bmax is None: bmax = numpy.finfo('float').max
661 LBounds = numpy.matrix( [NoneRemove(paire) for paire in LBounds] )
663 # Échantillonnage des états
666 if selfA._parameters["SimulationForQuantiles"] == "Linear" and HXa is not None:
667 HXa = numpy.matrix(numpy.ravel( HXa )).T
668 for i in range(nbsamples):
669 if selfA._parameters["SimulationForQuantiles"] == "Linear" and HtM is not None:
670 dXr = numpy.matrix(numpy.random.multivariate_normal(numpy.ravel(Xa),A) - numpy.ravel(Xa)).T
671 if LBounds is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
672 dXr = numpy.max(numpy.hstack((dXr,LBounds[:,0]) - Xa),axis=1)
673 dXr = numpy.min(numpy.hstack((dXr,LBounds[:,1]) - Xa),axis=1)
674 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
676 if selfA._toStore("SampledStateForQuantiles"): Xr = Xa + dXr
677 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear" and Hm is not None:
678 Xr = numpy.matrix(numpy.random.multivariate_normal(numpy.ravel(Xa),A)).T
679 if LBounds is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
680 Xr = numpy.max(numpy.hstack((Xr,LBounds[:,0])),axis=1)
681 Xr = numpy.min(numpy.hstack((Xr,LBounds[:,1])),axis=1)
682 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
684 raise ValueError("Quantile simulations has only to be Linear or NonLinear.")
688 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.ravel(Xr)
690 YfQ = numpy.hstack((YfQ,Yr))
691 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.vstack((EXr,numpy.ravel(Xr)))
693 # Extraction des quantiles
696 for quantile in selfA._parameters["Quantiles"]:
697 if not (0. <= float(quantile) <= 1.): continue
698 indice = int(nbsamples * float(quantile) - 1./nbsamples)
699 if YQ is None: YQ = YfQ[:,indice]
700 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
701 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
702 if selfA._toStore("SampledStateForQuantiles"):
703 selfA.StoredVariables["SampledStateForQuantiles"].store( EXr.T )
707 # ==============================================================================
708 def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
714 H = HO["Direct"].appliedControledFormTo
716 if selfA._parameters["EstimationOf"] == "State":
717 M = EM["Direct"].appliedControledFormTo
719 if CM is not None and "Tangent" in CM and U is not None:
720 Cm = CM["Tangent"].asMatrix(Xb)
724 # Précalcul des inversions de B et R
727 # Durée d'observation et tailles
728 LagL = selfA._parameters["SmootherLagL"]
729 if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
730 raise ValueError("Fixed-lag smoother requires a series of observation")
731 if Y.stepnumber() < LagL:
732 raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
733 duration = Y.stepnumber()
734 __p = numpy.cumprod(Y.shape())[-1]
736 __m = selfA._parameters["NumberOfMembers"]
738 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
740 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
741 selfA.StoredVariables["Analysis"].store( Xb )
742 if selfA._toStore("APosterioriCovariance"):
743 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
746 # Calcul direct initial (on privilégie la mémorisation au recalcul)
747 __seed = numpy.random.get_state()
748 selfB = copy.deepcopy(selfA)
749 selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
750 if VariantM == "EnKS16-KalmanFilterFormula":
751 etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
753 raise ValueError("VariantM has to be chosen in the authorized methods list.")
755 EL = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
757 EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
758 selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
760 for step in range(LagL,duration-1):
762 sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
765 if hasattr(Y,"store"):
766 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
768 Ynpu = numpy.ravel( Y ).reshape((__p,1))
771 if hasattr(U,"store") and len(U)>1:
772 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
773 elif hasattr(U,"store") and len(U)==1:
774 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
776 Un = numpy.asmatrix(numpy.ravel( U )).T
780 #--------------------------
781 if VariantM == "EnKS16-KalmanFilterFormula":
782 if selfA._parameters["EstimationOf"] == "State": # Forecast
783 EL = M( [(EL[:,i], Un) for i in range(__m)],
785 returnSerieAsArrayMatrix = True )
786 EL = EnsemblePerturbationWithGivenCovariance( EL, Q )
787 EZ = H( [(EL[:,i], Un) for i in range(__m)],
789 returnSerieAsArrayMatrix = True )
790 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
791 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
793 elif selfA._parameters["EstimationOf"] == "Parameters":
794 # --- > Par principe, M = Id, Q = 0
795 EZ = H( [(EL[:,i], Un) for i in range(__m)],
797 returnSerieAsArrayMatrix = True )
799 vEm = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
800 vZm = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
802 mS = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
803 delta = RIdemi @ ( Ynpu - vZm )
804 mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
805 vw = mT @ mS.T @ delta
807 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
808 mU = numpy.identity(__m)
809 wTU = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
811 EX = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
815 for irl in range(LagL): # Lissage des L précédentes analysis
816 vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
817 EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
818 sEL[irl] = vEm + EX @ wTU
820 # Conservation de l'analyse retrospective d'ordre 0 avant rotation
821 Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
822 if selfA._toStore("APosterioriCovariance"):
825 for irl in range(LagL):
826 sEL[irl] = sEL[irl+1]
828 #--------------------------
830 raise ValueError("VariantM has to be chosen in the authorized methods list.")
832 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
834 selfA.StoredVariables["Analysis"].store( Xa )
835 if selfA._toStore("APosterioriCovariance"):
836 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
838 # Stockage des dernières analyses incomplètement remises à jour
839 for irl in range(LagL):
840 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
841 Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
842 selfA.StoredVariables["Analysis"].store( Xa )
846 # ==============================================================================
847 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
849 Ensemble-Transform EnKF
851 if selfA._parameters["EstimationOf"] == "Parameters":
852 selfA._parameters["StoreInternalVariables"] = True
856 H = HO["Direct"].appliedControledFormTo
858 if selfA._parameters["EstimationOf"] == "State":
859 M = EM["Direct"].appliedControledFormTo
861 if CM is not None and "Tangent" in CM and U is not None:
862 Cm = CM["Tangent"].asMatrix(Xb)
866 # Nombre de pas identique au nombre de pas d'observations
867 # -------------------------------------------------------
868 if hasattr(Y,"stepnumber"):
869 duration = Y.stepnumber()
870 __p = numpy.cumprod(Y.shape())[-1]
873 __p = numpy.array(Y).size
875 # Précalcul des inversions de B et R
876 # ----------------------------------
877 if selfA._parameters["StoreInternalVariables"] \
878 or selfA._toStore("CostFunctionJ") \
879 or selfA._toStore("CostFunctionJb") \
880 or selfA._toStore("CostFunctionJo") \
881 or selfA._toStore("CurrentOptimum") \
882 or selfA._toStore("APosterioriCovariance"):
885 elif VariantM != "KalmanFilterFormula":
887 if VariantM == "KalmanFilterFormula":
893 __m = selfA._parameters["NumberOfMembers"]
894 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
896 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
897 #~ Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
899 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
900 selfA.StoredVariables["Analysis"].store( Xb )
901 if selfA._toStore("APosterioriCovariance"):
902 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
905 previousJMinimum = numpy.finfo(float).max
907 for step in range(duration-1):
908 if hasattr(Y,"store"):
909 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
911 Ynpu = numpy.ravel( Y ).reshape((__p,1))
914 if hasattr(U,"store") and len(U)>1:
915 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
916 elif hasattr(U,"store") and len(U)==1:
917 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
919 Un = numpy.asmatrix(numpy.ravel( U )).T
923 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
924 Xn = CovarianceInflation( Xn,
925 selfA._parameters["InflationType"],
926 selfA._parameters["InflationFactor"],
929 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
930 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
932 returnSerieAsArrayMatrix = True )
933 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
934 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
936 returnSerieAsArrayMatrix = True )
937 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
938 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
939 Xn_predicted = Xn_predicted + Cm * Un
940 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
941 # --- > Par principe, M = Id, Q = 0
943 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
945 returnSerieAsArrayMatrix = True )
947 # Mean of forecast and observation of forecast
948 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
949 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
952 EaX = EnsembleOfAnomalies( Xn_predicted, Xfm )
953 EaHX = EnsembleOfAnomalies( HX_predicted, Hfm)
955 #--------------------------
956 if VariantM == "KalmanFilterFormula":
957 mS = RIdemi * EaHX / math.sqrt(__m-1)
958 delta = RIdemi * ( Ynpu - Hfm )
959 mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
960 vw = mT @ mS.T @ delta
962 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
963 mU = numpy.identity(__m)
965 EaX = EaX / math.sqrt(__m-1)
966 Xn = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
967 #--------------------------
968 elif VariantM == "Variational":
969 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
971 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
972 _Jo = 0.5 * _A.T @ (RI * _A)
973 _Jb = 0.5 * (__m-1) * w.T @ w
976 def GradientOfCostFunction(w):
977 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
978 _GardJo = - EaHX.T @ (RI * _A)
979 _GradJb = (__m-1) * w.reshape((__m,1))
980 _GradJ = _GardJo + _GradJb
981 return numpy.ravel(_GradJ)
982 vw = scipy.optimize.fmin_cg(
984 x0 = numpy.zeros(__m),
985 fprime = GradientOfCostFunction,
990 Hto = EaHX.T @ (RI * EaHX)
991 Htb = (__m-1) * numpy.identity(__m)
994 Pta = numpy.linalg.inv( Hta )
995 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
997 Xn = Xfm + EaX @ (vw[:,None] + EWa)
998 #--------------------------
999 elif VariantM == "FiniteSize11": # Jauge Boc2011
1000 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1001 def CostFunction(w):
1002 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1003 _Jo = 0.5 * _A.T @ (RI * _A)
1004 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
1007 def GradientOfCostFunction(w):
1008 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1009 _GardJo = - EaHX.T @ (RI * _A)
1010 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1011 _GradJ = _GardJo + _GradJb
1012 return numpy.ravel(_GradJ)
1013 vw = scipy.optimize.fmin_cg(
1015 x0 = numpy.zeros(__m),
1016 fprime = GradientOfCostFunction,
1021 Hto = EaHX.T @ (RI * EaHX)
1023 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1024 / (1 + 1/__m + vw.T @ vw)**2
1027 Pta = numpy.linalg.inv( Hta )
1028 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1030 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1031 #--------------------------
1032 elif VariantM == "FiniteSize15": # Jauge Boc2015
1033 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1034 def CostFunction(w):
1035 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1036 _Jo = 0.5 * _A.T * RI * _A
1037 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
1040 def GradientOfCostFunction(w):
1041 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1042 _GardJo = - EaHX.T @ (RI * _A)
1043 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1044 _GradJ = _GardJo + _GradJb
1045 return numpy.ravel(_GradJ)
1046 vw = scipy.optimize.fmin_cg(
1048 x0 = numpy.zeros(__m),
1049 fprime = GradientOfCostFunction,
1054 Hto = EaHX.T @ (RI * EaHX)
1056 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1057 / (1 + 1/__m + vw.T @ vw)**2
1060 Pta = numpy.linalg.inv( Hta )
1061 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1063 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1064 #--------------------------
1065 elif VariantM == "FiniteSize16": # Jauge Boc2016
1066 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1067 def CostFunction(w):
1068 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1069 _Jo = 0.5 * _A.T @ (RI * _A)
1070 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
1073 def GradientOfCostFunction(w):
1074 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1075 _GardJo = - EaHX.T @ (RI * _A)
1076 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
1077 _GradJ = _GardJo + _GradJb
1078 return numpy.ravel(_GradJ)
1079 vw = scipy.optimize.fmin_cg(
1081 x0 = numpy.zeros(__m),
1082 fprime = GradientOfCostFunction,
1087 Hto = EaHX.T @ (RI * EaHX)
1088 Htb = ((__m+1) / (__m-1)) * \
1089 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
1090 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
1093 Pta = numpy.linalg.inv( Hta )
1094 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1096 Xn = Xfm + EaX @ (vw[:,None] + EWa)
1097 #--------------------------
1099 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1101 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1102 Xn = CovarianceInflation( Xn,
1103 selfA._parameters["InflationType"],
1104 selfA._parameters["InflationFactor"],
1107 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1108 #--------------------------
1110 if selfA._parameters["StoreInternalVariables"] \
1111 or selfA._toStore("CostFunctionJ") \
1112 or selfA._toStore("CostFunctionJb") \
1113 or selfA._toStore("CostFunctionJo") \
1114 or selfA._toStore("APosterioriCovariance") \
1115 or selfA._toStore("InnovationAtCurrentAnalysis") \
1116 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1117 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1118 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1119 _Innovation = Ynpu - _HXa
1121 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1122 # ---> avec analysis
1123 selfA.StoredVariables["Analysis"].store( Xa )
1124 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1125 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1126 if selfA._toStore("InnovationAtCurrentAnalysis"):
1127 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1128 # ---> avec current state
1129 if selfA._parameters["StoreInternalVariables"] \
1130 or selfA._toStore("CurrentState"):
1131 selfA.StoredVariables["CurrentState"].store( Xn )
1132 if selfA._toStore("ForecastState"):
1133 selfA.StoredVariables["ForecastState"].store( EMX )
1134 if selfA._toStore("BMA"):
1135 selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
1136 if selfA._toStore("InnovationAtCurrentState"):
1137 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1138 if selfA._toStore("SimulatedObservationAtCurrentState") \
1139 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1140 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1142 if selfA._parameters["StoreInternalVariables"] \
1143 or selfA._toStore("CostFunctionJ") \
1144 or selfA._toStore("CostFunctionJb") \
1145 or selfA._toStore("CostFunctionJo") \
1146 or selfA._toStore("CurrentOptimum") \
1147 or selfA._toStore("APosterioriCovariance"):
1148 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1149 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1151 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1152 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1153 selfA.StoredVariables["CostFunctionJ" ].store( J )
1155 if selfA._toStore("IndexOfOptimum") \
1156 or selfA._toStore("CurrentOptimum") \
1157 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1158 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1159 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1160 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1161 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1162 if selfA._toStore("IndexOfOptimum"):
1163 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1164 if selfA._toStore("CurrentOptimum"):
1165 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1166 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1167 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1168 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1169 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1170 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1171 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1172 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1173 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1174 if selfA._toStore("APosterioriCovariance"):
1175 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1176 if selfA._parameters["EstimationOf"] == "Parameters" \
1177 and J < previousJMinimum:
1178 previousJMinimum = J
1180 if selfA._toStore("APosterioriCovariance"):
1181 covarianceXaMin = Pn
1182 # ---> Pour les smoothers
1183 if selfA._toStore("CurrentEnsembleState"):
1184 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
1186 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1187 # ----------------------------------------------------------------------
1188 if selfA._parameters["EstimationOf"] == "Parameters":
1189 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1190 selfA.StoredVariables["Analysis"].store( XaMin )
1191 if selfA._toStore("APosterioriCovariance"):
1192 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1193 if selfA._toStore("BMA"):
1194 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1198 # ==============================================================================
1199 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
1200 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1204 if selfA._parameters["EstimationOf"] == "Parameters":
1205 selfA._parameters["StoreInternalVariables"] = True
1209 H = HO["Direct"].appliedControledFormTo
1211 if selfA._parameters["EstimationOf"] == "State":
1212 M = EM["Direct"].appliedControledFormTo
1214 if CM is not None and "Tangent" in CM and U is not None:
1215 Cm = CM["Tangent"].asMatrix(Xb)
1219 # Nombre de pas identique au nombre de pas d'observations
1220 # -------------------------------------------------------
1221 if hasattr(Y,"stepnumber"):
1222 duration = Y.stepnumber()
1223 __p = numpy.cumprod(Y.shape())[-1]
1226 __p = numpy.array(Y).size
1228 # Précalcul des inversions de B et R
1229 # ----------------------------------
1230 if selfA._parameters["StoreInternalVariables"] \
1231 or selfA._toStore("CostFunctionJ") \
1232 or selfA._toStore("CostFunctionJb") \
1233 or selfA._toStore("CostFunctionJo") \
1234 or selfA._toStore("CurrentOptimum") \
1235 or selfA._toStore("APosterioriCovariance"):
1242 __m = selfA._parameters["NumberOfMembers"]
1243 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1245 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1247 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
1249 Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
1251 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1252 selfA.StoredVariables["Analysis"].store( Xb )
1253 if selfA._toStore("APosterioriCovariance"):
1254 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1257 previousJMinimum = numpy.finfo(float).max
1259 for step in range(duration-1):
1260 if hasattr(Y,"store"):
1261 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1263 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1266 if hasattr(U,"store") and len(U)>1:
1267 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1268 elif hasattr(U,"store") and len(U)==1:
1269 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1271 Un = numpy.asmatrix(numpy.ravel( U )).T
1275 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1276 Xn = CovarianceInflation( Xn,
1277 selfA._parameters["InflationType"],
1278 selfA._parameters["InflationFactor"],
1281 #--------------------------
1282 if VariantM == "IEnKF12":
1283 Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
1284 EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
1288 Ta = numpy.identity(__m)
1289 vw = numpy.zeros(__m)
1290 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1291 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1294 E1 = vx1 + _epsilon * EaX
1296 E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1298 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
1299 E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1301 returnSerieAsArrayMatrix = True )
1302 elif selfA._parameters["EstimationOf"] == "Parameters":
1303 # --- > Par principe, M = Id
1305 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1306 vy1 = H((vx2, Un)).reshape((__p,1))
1308 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
1310 returnSerieAsArrayMatrix = True )
1311 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1314 EaY = (HE2 - vy2) / _epsilon
1316 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1318 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
1319 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY)
1320 Deltaw = - numpy.linalg.solve(mH,GradJ)
1325 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1329 A2 = EnsembleOfAnomalies( E2 )
1332 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1333 A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
1336 #--------------------------
1338 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1340 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1341 Xn = CovarianceInflation( Xn,
1342 selfA._parameters["InflationType"],
1343 selfA._parameters["InflationFactor"],
1346 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1347 #--------------------------
1349 if selfA._parameters["StoreInternalVariables"] \
1350 or selfA._toStore("CostFunctionJ") \
1351 or selfA._toStore("CostFunctionJb") \
1352 or selfA._toStore("CostFunctionJo") \
1353 or selfA._toStore("APosterioriCovariance") \
1354 or selfA._toStore("InnovationAtCurrentAnalysis") \
1355 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1356 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1357 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1358 _Innovation = Ynpu - _HXa
1360 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1361 # ---> avec analysis
1362 selfA.StoredVariables["Analysis"].store( Xa )
1363 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1364 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1365 if selfA._toStore("InnovationAtCurrentAnalysis"):
1366 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1367 # ---> avec current state
1368 if selfA._parameters["StoreInternalVariables"] \
1369 or selfA._toStore("CurrentState"):
1370 selfA.StoredVariables["CurrentState"].store( Xn )
1371 if selfA._toStore("ForecastState"):
1372 selfA.StoredVariables["ForecastState"].store( E2 )
1373 if selfA._toStore("BMA"):
1374 selfA.StoredVariables["BMA"].store( E2 - Xa )
1375 if selfA._toStore("InnovationAtCurrentState"):
1376 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1377 if selfA._toStore("SimulatedObservationAtCurrentState") \
1378 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1379 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1381 if selfA._parameters["StoreInternalVariables"] \
1382 or selfA._toStore("CostFunctionJ") \
1383 or selfA._toStore("CostFunctionJb") \
1384 or selfA._toStore("CostFunctionJo") \
1385 or selfA._toStore("CurrentOptimum") \
1386 or selfA._toStore("APosterioriCovariance"):
1387 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1388 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1390 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1391 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1392 selfA.StoredVariables["CostFunctionJ" ].store( J )
1394 if selfA._toStore("IndexOfOptimum") \
1395 or selfA._toStore("CurrentOptimum") \
1396 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1397 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1398 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1399 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1400 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1401 if selfA._toStore("IndexOfOptimum"):
1402 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1403 if selfA._toStore("CurrentOptimum"):
1404 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1405 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1406 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1407 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1408 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1409 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1410 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1411 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1412 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1413 if selfA._toStore("APosterioriCovariance"):
1414 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1415 if selfA._parameters["EstimationOf"] == "Parameters" \
1416 and J < previousJMinimum:
1417 previousJMinimum = J
1419 if selfA._toStore("APosterioriCovariance"):
1420 covarianceXaMin = Pn
1422 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1423 # ----------------------------------------------------------------------
1424 if selfA._parameters["EstimationOf"] == "Parameters":
1425 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1426 selfA.StoredVariables["Analysis"].store( XaMin )
1427 if selfA._toStore("APosterioriCovariance"):
1428 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1429 if selfA._toStore("BMA"):
1430 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1434 # ==============================================================================
1435 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1443 # Opérateur non-linéaire pour la boucle externe
1444 Hm = HO["Direct"].appliedTo
1446 # Précalcul des inversions de B et R
1450 # Point de démarrage de l'optimisation
1451 Xini = selfA._parameters["InitializationPoint"]
1453 HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
1454 Innovation = Y - HXb
1461 Xr = Xini.reshape((-1,1))
1462 while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
1466 Ht = HO["Tangent"].asMatrix(Xr)
1467 Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
1469 # Définition de la fonction-coût
1470 # ------------------------------
1471 def CostFunction(dx):
1472 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1473 if selfA._parameters["StoreInternalVariables"] or \
1474 selfA._toStore("CurrentState") or \
1475 selfA._toStore("CurrentOptimum"):
1476 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
1478 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1479 _dInnovation = Innovation - _HdX
1480 if selfA._toStore("SimulatedObservationAtCurrentState") or \
1481 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1482 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
1483 if selfA._toStore("InnovationAtCurrentState"):
1484 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
1486 Jb = float( 0.5 * _dX.T * BI * _dX )
1487 Jo = float( 0.5 * _dInnovation.T * RI * _dInnovation )
1490 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1491 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1492 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1493 selfA.StoredVariables["CostFunctionJ" ].store( J )
1494 if selfA._toStore("IndexOfOptimum") or \
1495 selfA._toStore("CurrentOptimum") or \
1496 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1497 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1498 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1499 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1500 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1501 if selfA._toStore("IndexOfOptimum"):
1502 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1503 if selfA._toStore("CurrentOptimum"):
1504 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1505 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1506 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1507 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1508 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1509 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1510 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1511 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1512 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1515 def GradientOfCostFunction(dx):
1516 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1518 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1519 _dInnovation = Innovation - _HdX
1521 GradJo = - Ht.T @ (RI * _dInnovation)
1522 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1525 # Minimisation de la fonctionnelle
1526 # --------------------------------
1527 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1529 if selfA._parameters["Minimizer"] == "LBFGSB":
1530 # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1531 if "0.19" <= scipy.version.version <= "1.1.0":
1532 import lbfgsbhlt as optimiseur
1534 import scipy.optimize as optimiseur
1535 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1536 func = CostFunction,
1537 x0 = numpy.zeros(Xini.size),
1538 fprime = GradientOfCostFunction,
1540 bounds = selfA._parameters["Bounds"],
1541 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
1542 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
1543 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1544 iprint = selfA._parameters["optiprint"],
1546 nfeval = Informations['funcalls']
1547 rc = Informations['warnflag']
1548 elif selfA._parameters["Minimizer"] == "TNC":
1549 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1550 func = CostFunction,
1551 x0 = numpy.zeros(Xini.size),
1552 fprime = GradientOfCostFunction,
1554 bounds = selfA._parameters["Bounds"],
1555 maxfun = selfA._parameters["MaximumNumberOfSteps"],
1556 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1557 ftol = selfA._parameters["CostDecrementTolerance"],
1558 messages = selfA._parameters["optmessages"],
1560 elif selfA._parameters["Minimizer"] == "CG":
1561 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1563 x0 = numpy.zeros(Xini.size),
1564 fprime = GradientOfCostFunction,
1566 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1567 gtol = selfA._parameters["GradientNormTolerance"],
1568 disp = selfA._parameters["optdisp"],
1571 elif selfA._parameters["Minimizer"] == "NCG":
1572 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1574 x0 = numpy.zeros(Xini.size),
1575 fprime = GradientOfCostFunction,
1577 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1578 avextol = selfA._parameters["CostDecrementTolerance"],
1579 disp = selfA._parameters["optdisp"],
1582 elif selfA._parameters["Minimizer"] == "BFGS":
1583 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1585 x0 = numpy.zeros(Xini.size),
1586 fprime = GradientOfCostFunction,
1588 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1589 gtol = selfA._parameters["GradientNormTolerance"],
1590 disp = selfA._parameters["optdisp"],
1594 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1596 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1597 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1599 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1600 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1601 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1603 Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
1606 DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
1607 iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
1609 # Obtention de l'analyse
1610 # ----------------------
1613 selfA.StoredVariables["Analysis"].store( Xa )
1615 if selfA._toStore("OMA") or \
1616 selfA._toStore("SigmaObs2") or \
1617 selfA._toStore("SimulationQuantiles") or \
1618 selfA._toStore("SimulatedObservationAtOptimum"):
1619 if selfA._toStore("SimulatedObservationAtCurrentState"):
1620 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1621 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1622 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1626 # Calcul de la covariance d'analyse
1627 # ---------------------------------
1628 if selfA._toStore("APosterioriCovariance") or \
1629 selfA._toStore("SimulationQuantiles") or \
1630 selfA._toStore("JacobianMatrixAtOptimum") or \
1631 selfA._toStore("KalmanGainAtOptimum"):
1632 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1633 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1634 if selfA._toStore("APosterioriCovariance") or \
1635 selfA._toStore("SimulationQuantiles") or \
1636 selfA._toStore("KalmanGainAtOptimum"):
1637 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1638 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1639 if selfA._toStore("APosterioriCovariance") or \
1640 selfA._toStore("SimulationQuantiles"):
1644 _ee = numpy.matrix(numpy.zeros(nb)).T
1646 _HtEE = numpy.dot(HtM,_ee)
1647 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
1648 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1649 HessienneI = numpy.matrix( HessienneI )
1651 if min(A.shape) != max(A.shape):
1652 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1653 if (numpy.diag(A) < 0).any():
1654 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1655 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1657 L = numpy.linalg.cholesky( A )
1659 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1660 if selfA._toStore("APosterioriCovariance"):
1661 selfA.StoredVariables["APosterioriCovariance"].store( A )
1662 if selfA._toStore("JacobianMatrixAtOptimum"):
1663 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1664 if selfA._toStore("KalmanGainAtOptimum"):
1665 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1666 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1667 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1669 # Calculs et/ou stockages supplémentaires
1670 # ---------------------------------------
1671 if selfA._toStore("Innovation") or \
1672 selfA._toStore("SigmaObs2") or \
1673 selfA._toStore("MahalanobisConsistency") or \
1674 selfA._toStore("OMB"):
1676 if selfA._toStore("Innovation"):
1677 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1678 if selfA._toStore("BMA"):
1679 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1680 if selfA._toStore("OMA"):
1681 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1682 if selfA._toStore("OMB"):
1683 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1684 if selfA._toStore("SigmaObs2"):
1685 TraceR = R.trace(Y.size)
1686 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1687 if selfA._toStore("MahalanobisConsistency"):
1688 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1689 if selfA._toStore("SimulationQuantiles"):
1690 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
1691 if selfA._toStore("SimulatedObservationAtBackground"):
1692 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1693 if selfA._toStore("SimulatedObservationAtOptimum"):
1694 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1698 # ==============================================================================
1699 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
1700 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1702 Maximum Likelihood Ensemble Filter
1704 if selfA._parameters["EstimationOf"] == "Parameters":
1705 selfA._parameters["StoreInternalVariables"] = True
1709 H = HO["Direct"].appliedControledFormTo
1711 if selfA._parameters["EstimationOf"] == "State":
1712 M = EM["Direct"].appliedControledFormTo
1714 if CM is not None and "Tangent" in CM and U is not None:
1715 Cm = CM["Tangent"].asMatrix(Xb)
1719 # Nombre de pas identique au nombre de pas d'observations
1720 # -------------------------------------------------------
1721 if hasattr(Y,"stepnumber"):
1722 duration = Y.stepnumber()
1723 __p = numpy.cumprod(Y.shape())[-1]
1726 __p = numpy.array(Y).size
1728 # Précalcul des inversions de B et R
1729 # ----------------------------------
1730 if selfA._parameters["StoreInternalVariables"] \
1731 or selfA._toStore("CostFunctionJ") \
1732 or selfA._toStore("CostFunctionJb") \
1733 or selfA._toStore("CostFunctionJo") \
1734 or selfA._toStore("CurrentOptimum") \
1735 or selfA._toStore("APosterioriCovariance"):
1742 __m = selfA._parameters["NumberOfMembers"]
1743 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1745 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1747 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1749 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1750 selfA.StoredVariables["Analysis"].store( Xb )
1751 if selfA._toStore("APosterioriCovariance"):
1752 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1755 previousJMinimum = numpy.finfo(float).max
1757 for step in range(duration-1):
1758 if hasattr(Y,"store"):
1759 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1761 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1764 if hasattr(U,"store") and len(U)>1:
1765 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1766 elif hasattr(U,"store") and len(U)==1:
1767 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1769 Un = numpy.asmatrix(numpy.ravel( U )).T
1773 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1774 Xn = CovarianceInflation( Xn,
1775 selfA._parameters["InflationType"],
1776 selfA._parameters["InflationFactor"],
1779 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1780 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1782 returnSerieAsArrayMatrix = True )
1783 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
1784 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1785 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1786 Xn_predicted = Xn_predicted + Cm * Un
1787 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1788 # --- > Par principe, M = Id, Q = 0
1791 #--------------------------
1792 if VariantM == "MLEF13":
1793 Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
1794 EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
1795 Ua = numpy.identity(__m)
1799 Ta = numpy.identity(__m)
1800 vw = numpy.zeros(__m)
1801 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1802 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1805 E1 = vx1 + _epsilon * EaX
1807 E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1809 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1811 returnSerieAsArrayMatrix = True )
1812 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1815 EaY = (HE2 - vy2) / _epsilon
1817 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1819 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
1820 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY)
1821 Deltaw = - numpy.linalg.solve(mH,GradJ)
1826 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1831 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1833 Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
1834 #--------------------------
1836 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1838 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1839 Xn = CovarianceInflation( Xn,
1840 selfA._parameters["InflationType"],
1841 selfA._parameters["InflationFactor"],
1844 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1845 #--------------------------
1847 if selfA._parameters["StoreInternalVariables"] \
1848 or selfA._toStore("CostFunctionJ") \
1849 or selfA._toStore("CostFunctionJb") \
1850 or selfA._toStore("CostFunctionJo") \
1851 or selfA._toStore("APosterioriCovariance") \
1852 or selfA._toStore("InnovationAtCurrentAnalysis") \
1853 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1854 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1855 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1856 _Innovation = Ynpu - _HXa
1858 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1859 # ---> avec analysis
1860 selfA.StoredVariables["Analysis"].store( Xa )
1861 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1862 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1863 if selfA._toStore("InnovationAtCurrentAnalysis"):
1864 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1865 # ---> avec current state
1866 if selfA._parameters["StoreInternalVariables"] \
1867 or selfA._toStore("CurrentState"):
1868 selfA.StoredVariables["CurrentState"].store( Xn )
1869 if selfA._toStore("ForecastState"):
1870 selfA.StoredVariables["ForecastState"].store( EMX )
1871 if selfA._toStore("BMA"):
1872 selfA.StoredVariables["BMA"].store( EMX - Xa )
1873 if selfA._toStore("InnovationAtCurrentState"):
1874 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1875 if selfA._toStore("SimulatedObservationAtCurrentState") \
1876 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1877 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1879 if selfA._parameters["StoreInternalVariables"] \
1880 or selfA._toStore("CostFunctionJ") \
1881 or selfA._toStore("CostFunctionJb") \
1882 or selfA._toStore("CostFunctionJo") \
1883 or selfA._toStore("CurrentOptimum") \
1884 or selfA._toStore("APosterioriCovariance"):
1885 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1886 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1888 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1889 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1890 selfA.StoredVariables["CostFunctionJ" ].store( J )
1892 if selfA._toStore("IndexOfOptimum") \
1893 or selfA._toStore("CurrentOptimum") \
1894 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1895 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1896 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1897 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1898 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1899 if selfA._toStore("IndexOfOptimum"):
1900 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1901 if selfA._toStore("CurrentOptimum"):
1902 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1903 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1904 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1905 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1906 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1907 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1908 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1909 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1910 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1911 if selfA._toStore("APosterioriCovariance"):
1912 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1913 if selfA._parameters["EstimationOf"] == "Parameters" \
1914 and J < previousJMinimum:
1915 previousJMinimum = J
1917 if selfA._toStore("APosterioriCovariance"):
1918 covarianceXaMin = Pn
1920 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1921 # ----------------------------------------------------------------------
1922 if selfA._parameters["EstimationOf"] == "Parameters":
1923 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1924 selfA.StoredVariables["Analysis"].store( XaMin )
1925 if selfA._toStore("APosterioriCovariance"):
1926 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1927 if selfA._toStore("BMA"):
1928 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1932 # ==============================================================================
1944 Implémentation informatique de l'algorithme MMQR, basée sur la publication :
1945 David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
1946 Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
1949 # Recuperation des donnees et informations initiales
1950 # --------------------------------------------------
1951 variables = numpy.ravel( x0 )
1952 mesures = numpy.ravel( y )
1953 increment = sys.float_info[0]
1956 quantile = float(quantile)
1958 # Calcul des parametres du MM
1959 # ---------------------------
1960 tn = float(toler) / n
1961 e0 = -tn / math.log(tn)
1962 epsilon = (e0-tn)/(1+math.log(e0))
1964 # Calculs d'initialisation
1965 # ------------------------
1966 residus = mesures - numpy.ravel( func( variables ) )
1967 poids = 1./(epsilon+numpy.abs(residus))
1968 veps = 1. - 2. * quantile - residus * poids
1969 lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
1972 # Recherche iterative
1973 # -------------------
1974 while (increment > toler) and (iteration < maxfun) :
1977 Derivees = numpy.array(fprime(variables))
1978 Derivees = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
1979 DeriveesT = Derivees.transpose()
1980 M = numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
1981 SM = numpy.transpose(numpy.dot( DeriveesT , veps ))
1982 step = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
1984 variables = variables + step
1985 if bounds is not None:
1986 # Attention : boucle infinie à éviter si un intervalle est trop petit
1987 while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
1989 variables = variables - step
1990 residus = mesures - numpy.ravel( func(variables) )
1991 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
1993 while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
1995 variables = variables - step
1996 residus = mesures - numpy.ravel( func(variables) )
1997 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
1999 increment = lastsurrogate-surrogate
2000 poids = 1./(epsilon+numpy.abs(residus))
2001 veps = 1. - 2. * quantile - residus * poids
2002 lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
2006 Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
2008 return variables, Ecart, [n,p,iteration,increment,0]
2010 # ==============================================================================
2011 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
2013 3DVAR multi-pas et multi-méthodes
2018 Xn = numpy.ravel(Xb).reshape((-1,1))
2020 if selfA._parameters["EstimationOf"] == "State":
2021 M = EM["Direct"].appliedTo
2023 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2024 selfA.StoredVariables["Analysis"].store( Xn )
2025 if selfA._toStore("APosterioriCovariance"):
2026 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(Xn.size)
2028 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2029 if selfA._toStore("ForecastState"):
2030 selfA.StoredVariables["ForecastState"].store( Xn )
2032 if hasattr(Y,"stepnumber"):
2033 duration = Y.stepnumber()
2039 for step in range(duration-1):
2040 if hasattr(Y,"store"):
2041 Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
2043 Ynpu = numpy.ravel( Y ).reshape((-1,1))
2045 if selfA._parameters["EstimationOf"] == "State": # Forecast
2046 Xn = selfA.StoredVariables["Analysis"][-1]
2047 Xn_predicted = M( Xn )
2048 if selfA._toStore("ForecastState"):
2049 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
2050 elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
2051 # --- > Par principe, M = Id, Q = 0
2053 Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
2055 oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
2059 # ==============================================================================
2060 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2069 Hm = HO["Direct"].appliedTo
2071 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
2072 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2073 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2076 HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2077 if Y.size != HXb.size:
2078 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2079 if max(Y.shape) != max(HXb.shape):
2080 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2082 if selfA._toStore("JacobianMatrixAtBackground"):
2083 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2084 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2085 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2087 Ht = HO["Tangent"].asMatrix(Xb)
2089 HBHTpR = R + Ht * BHT
2090 Innovation = Y - HXb
2092 # Point de démarrage de l'optimisation
2093 Xini = numpy.zeros(Xb.shape)
2095 # Définition de la fonction-coût
2096 # ------------------------------
2097 def CostFunction(w):
2098 _W = numpy.asmatrix(numpy.ravel( w )).T
2099 if selfA._parameters["StoreInternalVariables"] or \
2100 selfA._toStore("CurrentState") or \
2101 selfA._toStore("CurrentOptimum"):
2102 selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
2103 if selfA._toStore("SimulatedObservationAtCurrentState") or \
2104 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2105 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
2106 if selfA._toStore("InnovationAtCurrentState"):
2107 selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
2109 Jb = float( 0.5 * _W.T * HBHTpR * _W )
2110 Jo = float( - _W.T * Innovation )
2113 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2114 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2115 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2116 selfA.StoredVariables["CostFunctionJ" ].store( J )
2117 if selfA._toStore("IndexOfOptimum") or \
2118 selfA._toStore("CurrentOptimum") or \
2119 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2120 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2121 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2122 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2123 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2124 if selfA._toStore("IndexOfOptimum"):
2125 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2126 if selfA._toStore("CurrentOptimum"):
2127 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2128 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2129 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2130 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2131 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2132 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2133 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2134 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2135 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2138 def GradientOfCostFunction(w):
2139 _W = numpy.asmatrix(numpy.ravel( w )).T
2140 GradJb = HBHTpR * _W
2141 GradJo = - Innovation
2142 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2145 # Minimisation de la fonctionnelle
2146 # --------------------------------
2147 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2149 if selfA._parameters["Minimizer"] == "LBFGSB":
2150 if "0.19" <= scipy.version.version <= "1.1.0":
2151 import lbfgsbhlt as optimiseur
2153 import scipy.optimize as optimiseur
2154 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2155 func = CostFunction,
2157 fprime = GradientOfCostFunction,
2159 bounds = selfA._parameters["Bounds"],
2160 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
2161 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
2162 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2163 iprint = selfA._parameters["optiprint"],
2165 nfeval = Informations['funcalls']
2166 rc = Informations['warnflag']
2167 elif selfA._parameters["Minimizer"] == "TNC":
2168 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2169 func = CostFunction,
2171 fprime = GradientOfCostFunction,
2173 bounds = selfA._parameters["Bounds"],
2174 maxfun = selfA._parameters["MaximumNumberOfSteps"],
2175 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2176 ftol = selfA._parameters["CostDecrementTolerance"],
2177 messages = selfA._parameters["optmessages"],
2179 elif selfA._parameters["Minimizer"] == "CG":
2180 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2183 fprime = GradientOfCostFunction,
2185 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2186 gtol = selfA._parameters["GradientNormTolerance"],
2187 disp = selfA._parameters["optdisp"],
2190 elif selfA._parameters["Minimizer"] == "NCG":
2191 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2194 fprime = GradientOfCostFunction,
2196 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2197 avextol = selfA._parameters["CostDecrementTolerance"],
2198 disp = selfA._parameters["optdisp"],
2201 elif selfA._parameters["Minimizer"] == "BFGS":
2202 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2205 fprime = GradientOfCostFunction,
2207 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2208 gtol = selfA._parameters["GradientNormTolerance"],
2209 disp = selfA._parameters["optdisp"],
2213 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2215 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2216 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2218 # Correction pour pallier a un bug de TNC sur le retour du Minimum
2219 # ----------------------------------------------------------------
2220 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2221 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2222 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
2224 Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
2226 # Obtention de l'analyse
2227 # ----------------------
2230 selfA.StoredVariables["Analysis"].store( Xa )
2232 if selfA._toStore("OMA") or \
2233 selfA._toStore("SigmaObs2") or \
2234 selfA._toStore("SimulationQuantiles") or \
2235 selfA._toStore("SimulatedObservationAtOptimum"):
2236 if selfA._toStore("SimulatedObservationAtCurrentState"):
2237 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2238 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2239 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2243 # Calcul de la covariance d'analyse
2244 # ---------------------------------
2245 if selfA._toStore("APosterioriCovariance") or \
2246 selfA._toStore("SimulationQuantiles") or \
2247 selfA._toStore("JacobianMatrixAtOptimum") or \
2248 selfA._toStore("KalmanGainAtOptimum"):
2249 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2250 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2251 if selfA._toStore("APosterioriCovariance") or \
2252 selfA._toStore("SimulationQuantiles") or \
2253 selfA._toStore("KalmanGainAtOptimum"):
2254 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2255 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2256 if selfA._toStore("APosterioriCovariance") or \
2257 selfA._toStore("SimulationQuantiles"):
2263 _ee = numpy.matrix(numpy.zeros(nb)).T
2265 _HtEE = numpy.dot(HtM,_ee)
2266 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
2267 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2268 HessienneI = numpy.matrix( HessienneI )
2270 if min(A.shape) != max(A.shape):
2271 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2272 if (numpy.diag(A) < 0).any():
2273 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2274 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2276 L = numpy.linalg.cholesky( A )
2278 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2279 if selfA._toStore("APosterioriCovariance"):
2280 selfA.StoredVariables["APosterioriCovariance"].store( A )
2281 if selfA._toStore("JacobianMatrixAtOptimum"):
2282 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2283 if selfA._toStore("KalmanGainAtOptimum"):
2284 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2285 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2286 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2288 # Calculs et/ou stockages supplémentaires
2289 # ---------------------------------------
2290 if selfA._toStore("Innovation") or \
2291 selfA._toStore("SigmaObs2") or \
2292 selfA._toStore("MahalanobisConsistency") or \
2293 selfA._toStore("OMB"):
2295 if selfA._toStore("Innovation"):
2296 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2297 if selfA._toStore("BMA"):
2298 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2299 if selfA._toStore("OMA"):
2300 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2301 if selfA._toStore("OMB"):
2302 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2303 if selfA._toStore("SigmaObs2"):
2304 TraceR = R.trace(Y.size)
2305 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2306 if selfA._toStore("MahalanobisConsistency"):
2307 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2308 if selfA._toStore("SimulationQuantiles"):
2309 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
2310 if selfA._toStore("SimulatedObservationAtBackground"):
2311 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2312 if selfA._toStore("SimulatedObservationAtOptimum"):
2313 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2317 # ==============================================================================
2318 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
2322 if selfA._parameters["EstimationOf"] == "Parameters":
2323 selfA._parameters["StoreInternalVariables"] = True
2326 H = HO["Direct"].appliedControledFormTo
2328 if selfA._parameters["EstimationOf"] == "State":
2329 M = EM["Direct"].appliedControledFormTo
2331 if CM is not None and "Tangent" in CM and U is not None:
2332 Cm = CM["Tangent"].asMatrix(Xb)
2336 # Durée d'observation et tailles
2337 if hasattr(Y,"stepnumber"):
2338 duration = Y.stepnumber()
2339 __p = numpy.cumprod(Y.shape())[-1]
2342 __p = numpy.array(Y).size
2344 # Précalcul des inversions de B et R
2345 if selfA._parameters["StoreInternalVariables"] \
2346 or selfA._toStore("CostFunctionJ") \
2347 or selfA._toStore("CostFunctionJb") \
2348 or selfA._toStore("CostFunctionJo") \
2349 or selfA._toStore("CurrentOptimum") \
2350 or selfA._toStore("APosterioriCovariance"):
2355 __m = selfA._parameters["NumberOfMembers"]
2357 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2359 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2361 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2363 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2364 selfA.StoredVariables["Analysis"].store( Xb )
2365 if selfA._toStore("APosterioriCovariance"):
2366 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2369 previousJMinimum = numpy.finfo(float).max
2371 for step in range(duration-1):
2372 if hasattr(Y,"store"):
2373 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2375 Ynpu = numpy.ravel( Y ).reshape((__p,1))
2378 if hasattr(U,"store") and len(U)>1:
2379 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2380 elif hasattr(U,"store") and len(U)==1:
2381 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2383 Un = numpy.asmatrix(numpy.ravel( U )).T
2387 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2388 Xn = CovarianceInflation( Xn,
2389 selfA._parameters["InflationType"],
2390 selfA._parameters["InflationFactor"],
2393 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2394 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2396 returnSerieAsArrayMatrix = True )
2397 Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
2398 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2400 returnSerieAsArrayMatrix = True )
2401 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2402 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2403 Xn_predicted = Xn_predicted + Cm * Un
2404 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2405 # --- > Par principe, M = Id, Q = 0
2407 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2409 returnSerieAsArrayMatrix = True )
2411 # Mean of forecast and observation of forecast
2412 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2413 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2415 #--------------------------
2416 if VariantM == "KalmanFilterFormula05":
2417 PfHT, HPfHT = 0., 0.
2418 for i in range(__m):
2419 Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
2420 Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
2421 PfHT += Exfi * Eyfi.T
2422 HPfHT += Eyfi * Eyfi.T
2423 PfHT = (1./(__m-1)) * PfHT
2424 HPfHT = (1./(__m-1)) * HPfHT
2425 Kn = PfHT * ( R + HPfHT ).I
2428 for i in range(__m):
2429 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
2430 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
2431 #--------------------------
2432 elif VariantM == "KalmanFilterFormula16":
2433 EpY = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
2434 EpYm = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2436 EaX = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
2437 EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
2439 Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
2441 for i in range(__m):
2442 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
2443 #--------------------------
2445 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2447 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2448 Xn = CovarianceInflation( Xn,
2449 selfA._parameters["InflationType"],
2450 selfA._parameters["InflationFactor"],
2453 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2454 #--------------------------
2456 if selfA._parameters["StoreInternalVariables"] \
2457 or selfA._toStore("CostFunctionJ") \
2458 or selfA._toStore("CostFunctionJb") \
2459 or selfA._toStore("CostFunctionJo") \
2460 or selfA._toStore("APosterioriCovariance") \
2461 or selfA._toStore("InnovationAtCurrentAnalysis") \
2462 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2463 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2464 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2465 _Innovation = Ynpu - _HXa
2467 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2468 # ---> avec analysis
2469 selfA.StoredVariables["Analysis"].store( Xa )
2470 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2471 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2472 if selfA._toStore("InnovationAtCurrentAnalysis"):
2473 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2474 # ---> avec current state
2475 if selfA._parameters["StoreInternalVariables"] \
2476 or selfA._toStore("CurrentState"):
2477 selfA.StoredVariables["CurrentState"].store( Xn )
2478 if selfA._toStore("ForecastState"):
2479 selfA.StoredVariables["ForecastState"].store( EMX )
2480 if selfA._toStore("BMA"):
2481 selfA.StoredVariables["BMA"].store( EMX - Xa )
2482 if selfA._toStore("InnovationAtCurrentState"):
2483 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
2484 if selfA._toStore("SimulatedObservationAtCurrentState") \
2485 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2486 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2488 if selfA._parameters["StoreInternalVariables"] \
2489 or selfA._toStore("CostFunctionJ") \
2490 or selfA._toStore("CostFunctionJb") \
2491 or selfA._toStore("CostFunctionJo") \
2492 or selfA._toStore("CurrentOptimum") \
2493 or selfA._toStore("APosterioriCovariance"):
2494 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2495 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2497 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2498 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2499 selfA.StoredVariables["CostFunctionJ" ].store( J )
2501 if selfA._toStore("IndexOfOptimum") \
2502 or selfA._toStore("CurrentOptimum") \
2503 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2504 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2505 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2506 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2507 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2508 if selfA._toStore("IndexOfOptimum"):
2509 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2510 if selfA._toStore("CurrentOptimum"):
2511 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2512 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2513 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2514 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2515 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2516 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2517 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2518 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2519 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2520 if selfA._toStore("APosterioriCovariance"):
2521 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2522 if selfA._parameters["EstimationOf"] == "Parameters" \
2523 and J < previousJMinimum:
2524 previousJMinimum = J
2526 if selfA._toStore("APosterioriCovariance"):
2527 covarianceXaMin = Pn
2529 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2530 # ----------------------------------------------------------------------
2531 if selfA._parameters["EstimationOf"] == "Parameters":
2532 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2533 selfA.StoredVariables["Analysis"].store( XaMin )
2534 if selfA._toStore("APosterioriCovariance"):
2535 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2536 if selfA._toStore("BMA"):
2537 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2541 # ==============================================================================
2542 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2551 Hm = HO["Direct"].appliedTo
2552 Ha = HO["Adjoint"].appliedInXTo
2554 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
2555 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2556 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2559 HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2560 if Y.size != HXb.size:
2561 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2562 if max(Y.shape) != max(HXb.shape):
2563 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2565 if selfA._toStore("JacobianMatrixAtBackground"):
2566 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2567 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2568 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2570 # Précalcul des inversions de B et R
2574 # Point de démarrage de l'optimisation
2575 Xini = selfA._parameters["InitializationPoint"]
2577 # Définition de la fonction-coût
2578 # ------------------------------
2579 def CostFunction(x):
2580 _X = numpy.asmatrix(numpy.ravel( x )).T
2581 if selfA._parameters["StoreInternalVariables"] or \
2582 selfA._toStore("CurrentState") or \
2583 selfA._toStore("CurrentOptimum"):
2584 selfA.StoredVariables["CurrentState"].store( _X )
2586 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2587 _Innovation = Y - _HX
2588 if selfA._toStore("SimulatedObservationAtCurrentState") or \
2589 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2590 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
2591 if selfA._toStore("InnovationAtCurrentState"):
2592 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
2594 Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
2595 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2598 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2599 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2600 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2601 selfA.StoredVariables["CostFunctionJ" ].store( J )
2602 if selfA._toStore("IndexOfOptimum") or \
2603 selfA._toStore("CurrentOptimum") or \
2604 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2605 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2606 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2607 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2608 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2609 if selfA._toStore("IndexOfOptimum"):
2610 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2611 if selfA._toStore("CurrentOptimum"):
2612 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2613 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2614 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2615 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2616 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2617 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2618 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2619 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2620 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2623 def GradientOfCostFunction(x):
2624 _X = numpy.asmatrix(numpy.ravel( x )).T
2626 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2627 GradJb = BI * (_X - Xb)
2628 GradJo = - Ha( (_X, RI * (Y - _HX)) )
2629 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2632 # Minimisation de la fonctionnelle
2633 # --------------------------------
2634 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2636 if selfA._parameters["Minimizer"] == "LBFGSB":
2637 if "0.19" <= scipy.version.version <= "1.1.0":
2638 import lbfgsbhlt as optimiseur
2640 import scipy.optimize as optimiseur
2641 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2642 func = CostFunction,
2644 fprime = GradientOfCostFunction,
2646 bounds = selfA._parameters["Bounds"],
2647 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
2648 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
2649 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2650 iprint = selfA._parameters["optiprint"],
2652 nfeval = Informations['funcalls']
2653 rc = Informations['warnflag']
2654 elif selfA._parameters["Minimizer"] == "TNC":
2655 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2656 func = CostFunction,
2658 fprime = GradientOfCostFunction,
2660 bounds = selfA._parameters["Bounds"],
2661 maxfun = selfA._parameters["MaximumNumberOfSteps"],
2662 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2663 ftol = selfA._parameters["CostDecrementTolerance"],
2664 messages = selfA._parameters["optmessages"],
2666 elif selfA._parameters["Minimizer"] == "CG":
2667 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2670 fprime = GradientOfCostFunction,
2672 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2673 gtol = selfA._parameters["GradientNormTolerance"],
2674 disp = selfA._parameters["optdisp"],
2677 elif selfA._parameters["Minimizer"] == "NCG":
2678 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2681 fprime = GradientOfCostFunction,
2683 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2684 avextol = selfA._parameters["CostDecrementTolerance"],
2685 disp = selfA._parameters["optdisp"],
2688 elif selfA._parameters["Minimizer"] == "BFGS":
2689 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2692 fprime = GradientOfCostFunction,
2694 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2695 gtol = selfA._parameters["GradientNormTolerance"],
2696 disp = selfA._parameters["optdisp"],
2700 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2702 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2703 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2705 # Correction pour pallier a un bug de TNC sur le retour du Minimum
2706 # ----------------------------------------------------------------
2707 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2708 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2710 # Obtention de l'analyse
2711 # ----------------------
2712 Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
2714 selfA.StoredVariables["Analysis"].store( Xa )
2716 if selfA._toStore("OMA") or \
2717 selfA._toStore("SigmaObs2") or \
2718 selfA._toStore("SimulationQuantiles") or \
2719 selfA._toStore("SimulatedObservationAtOptimum"):
2720 if selfA._toStore("SimulatedObservationAtCurrentState"):
2721 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2722 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2723 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2727 # Calcul de la covariance d'analyse
2728 # ---------------------------------
2729 if selfA._toStore("APosterioriCovariance") or \
2730 selfA._toStore("SimulationQuantiles") or \
2731 selfA._toStore("JacobianMatrixAtOptimum") or \
2732 selfA._toStore("KalmanGainAtOptimum"):
2733 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2734 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2735 if selfA._toStore("APosterioriCovariance") or \
2736 selfA._toStore("SimulationQuantiles") or \
2737 selfA._toStore("KalmanGainAtOptimum"):
2738 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2739 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2740 if selfA._toStore("APosterioriCovariance") or \
2741 selfA._toStore("SimulationQuantiles"):
2745 _ee = numpy.matrix(numpy.zeros(nb)).T
2747 _HtEE = numpy.dot(HtM,_ee)
2748 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
2749 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2750 HessienneI = numpy.matrix( HessienneI )
2752 if min(A.shape) != max(A.shape):
2753 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2754 if (numpy.diag(A) < 0).any():
2755 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2756 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2758 L = numpy.linalg.cholesky( A )
2760 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2761 if selfA._toStore("APosterioriCovariance"):
2762 selfA.StoredVariables["APosterioriCovariance"].store( A )
2763 if selfA._toStore("JacobianMatrixAtOptimum"):
2764 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2765 if selfA._toStore("KalmanGainAtOptimum"):
2766 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2767 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2768 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2770 # Calculs et/ou stockages supplémentaires
2771 # ---------------------------------------
2772 if selfA._toStore("Innovation") or \
2773 selfA._toStore("SigmaObs2") or \
2774 selfA._toStore("MahalanobisConsistency") or \
2775 selfA._toStore("OMB"):
2777 if selfA._toStore("Innovation"):
2778 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2779 if selfA._toStore("BMA"):
2780 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2781 if selfA._toStore("OMA"):
2782 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2783 if selfA._toStore("OMB"):
2784 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2785 if selfA._toStore("SigmaObs2"):
2786 TraceR = R.trace(Y.size)
2787 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2788 if selfA._toStore("MahalanobisConsistency"):
2789 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2790 if selfA._toStore("SimulationQuantiles"):
2791 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
2792 if selfA._toStore("SimulatedObservationAtBackground"):
2793 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2794 if selfA._toStore("SimulatedObservationAtOptimum"):
2795 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2799 # ==============================================================================
2800 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2809 Hm = HO["Direct"].appliedControledFormTo
2810 Mm = EM["Direct"].appliedControledFormTo
2812 if CM is not None and "Tangent" in CM and U is not None:
2813 Cm = CM["Tangent"].asMatrix(Xb)
2819 if hasattr(U,"store") and 1<=_step<len(U) :
2820 _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
2821 elif hasattr(U,"store") and len(U)==1:
2822 _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2824 _Un = numpy.asmatrix(numpy.ravel( U )).T
2829 if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
2830 _Cm = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
2836 # Remarque : les observations sont exploitées à partir du pas de temps
2837 # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
2838 # Donc le pas 0 n'est pas utilisé puisque la première étape commence
2839 # avec l'observation du pas 1.
2841 # Nombre de pas identique au nombre de pas d'observations
2842 if hasattr(Y,"stepnumber"):
2843 duration = Y.stepnumber()
2847 # Précalcul des inversions de B et R
2851 # Point de démarrage de l'optimisation
2852 Xini = selfA._parameters["InitializationPoint"]
2854 # Définition de la fonction-coût
2855 # ------------------------------
2856 selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
2857 selfA.DirectInnovation = [None,] # Le pas 0 n'est pas observé
2858 def CostFunction(x):
2859 _X = numpy.asmatrix(numpy.ravel( x )).T
2860 if selfA._parameters["StoreInternalVariables"] or \
2861 selfA._toStore("CurrentState") or \
2862 selfA._toStore("CurrentOptimum"):
2863 selfA.StoredVariables["CurrentState"].store( _X )
2864 Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
2865 selfA.DirectCalculation = [None,]
2866 selfA.DirectInnovation = [None,]
2869 for step in range(0,duration-1):
2870 if hasattr(Y,"store"):
2871 _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
2873 _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
2877 if selfA._parameters["EstimationOf"] == "State":
2878 _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
2879 elif selfA._parameters["EstimationOf"] == "Parameters":
2882 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
2883 _Xn = numpy.max(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
2884 _Xn = numpy.min(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
2886 # Etape de différence aux observations
2887 if selfA._parameters["EstimationOf"] == "State":
2888 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
2889 elif selfA._parameters["EstimationOf"] == "Parameters":
2890 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
2892 # Stockage de l'état
2893 selfA.DirectCalculation.append( _Xn )
2894 selfA.DirectInnovation.append( _YmHMX )
2896 # Ajout dans la fonctionnelle d'observation
2897 Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
2900 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2901 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2902 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2903 selfA.StoredVariables["CostFunctionJ" ].store( J )
2904 if selfA._toStore("IndexOfOptimum") or \
2905 selfA._toStore("CurrentOptimum") or \
2906 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2907 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2908 selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2909 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2910 if selfA._toStore("IndexOfOptimum"):
2911 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2912 if selfA._toStore("CurrentOptimum"):
2913 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2914 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2915 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2916 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2917 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2918 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2919 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2922 def GradientOfCostFunction(x):
2923 _X = numpy.asmatrix(numpy.ravel( x )).T
2924 GradJb = BI * (_X - Xb)
2926 for step in range(duration-1,0,-1):
2927 # Étape de récupération du dernier stockage de l'évolution
2928 _Xn = selfA.DirectCalculation.pop()
2929 # Étape de récupération du dernier stockage de l'innovation
2930 _YmHMX = selfA.DirectInnovation.pop()
2931 # Calcul des adjoints
2932 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
2933 Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
2934 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
2935 Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
2936 # Calcul du gradient par état adjoint
2937 GradJo = GradJo + Ha * RI * _YmHMX # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
2938 GradJo = Ma * GradJo # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
2939 GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
2942 # Minimisation de la fonctionnelle
2943 # --------------------------------
2944 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2946 if selfA._parameters["Minimizer"] == "LBFGSB":
2947 if "0.19" <= scipy.version.version <= "1.1.0":
2948 import lbfgsbhlt as optimiseur
2950 import scipy.optimize as optimiseur
2951 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2952 func = CostFunction,
2954 fprime = GradientOfCostFunction,
2956 bounds = selfA._parameters["Bounds"],
2957 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
2958 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
2959 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2960 iprint = selfA._parameters["optiprint"],
2962 nfeval = Informations['funcalls']
2963 rc = Informations['warnflag']
2964 elif selfA._parameters["Minimizer"] == "TNC":
2965 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2966 func = CostFunction,
2968 fprime = GradientOfCostFunction,
2970 bounds = selfA._parameters["Bounds"],
2971 maxfun = selfA._parameters["MaximumNumberOfSteps"],
2972 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2973 ftol = selfA._parameters["CostDecrementTolerance"],
2974 messages = selfA._parameters["optmessages"],
2976 elif selfA._parameters["Minimizer"] == "CG":
2977 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2980 fprime = GradientOfCostFunction,
2982 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2983 gtol = selfA._parameters["GradientNormTolerance"],
2984 disp = selfA._parameters["optdisp"],
2987 elif selfA._parameters["Minimizer"] == "NCG":
2988 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2991 fprime = GradientOfCostFunction,
2993 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2994 avextol = selfA._parameters["CostDecrementTolerance"],
2995 disp = selfA._parameters["optdisp"],
2998 elif selfA._parameters["Minimizer"] == "BFGS":
2999 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3002 fprime = GradientOfCostFunction,
3004 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3005 gtol = selfA._parameters["GradientNormTolerance"],
3006 disp = selfA._parameters["optdisp"],
3010 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3012 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3013 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3015 # Correction pour pallier a un bug de TNC sur le retour du Minimum
3016 # ----------------------------------------------------------------
3017 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3018 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3020 # Obtention de l'analyse
3021 # ----------------------
3022 Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
3024 selfA.StoredVariables["Analysis"].store( Xa )
3026 # Calculs et/ou stockages supplémentaires
3027 # ---------------------------------------
3028 if selfA._toStore("BMA"):
3029 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3033 # ==============================================================================
3034 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3036 3DVAR variational analysis with no inversion of B
3043 Hm = HO["Direct"].appliedTo
3044 Ha = HO["Adjoint"].appliedInXTo
3046 # Précalcul des inversions de B et R
3050 # Point de démarrage de l'optimisation
3051 Xini = numpy.zeros(Xb.shape)
3053 # Définition de la fonction-coût
3054 # ------------------------------
3055 def CostFunction(v):
3056 _V = numpy.asmatrix(numpy.ravel( v )).T
3058 if selfA._parameters["StoreInternalVariables"] or \
3059 selfA._toStore("CurrentState") or \
3060 selfA._toStore("CurrentOptimum"):
3061 selfA.StoredVariables["CurrentState"].store( _X )
3063 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
3064 _Innovation = Y - _HX
3065 if selfA._toStore("SimulatedObservationAtCurrentState") or \
3066 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3067 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
3068 if selfA._toStore("InnovationAtCurrentState"):
3069 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3071 Jb = float( 0.5 * _V.T * BT * _V )
3072 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
3075 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3076 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3077 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3078 selfA.StoredVariables["CostFunctionJ" ].store( J )
3079 if selfA._toStore("IndexOfOptimum") or \
3080 selfA._toStore("CurrentOptimum") or \
3081 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3082 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3083 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
3084 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3085 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3086 if selfA._toStore("IndexOfOptimum"):
3087 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3088 if selfA._toStore("CurrentOptimum"):
3089 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3090 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3091 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
3092 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3093 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3094 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3095 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3096 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3097 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3100 def GradientOfCostFunction(v):
3101 _V = numpy.asmatrix(numpy.ravel( v )).T
3104 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
3106 GradJo = - Ha( (_X, RI * (Y - _HX)) )
3107 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
3110 # Minimisation de la fonctionnelle
3111 # --------------------------------
3112 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3114 if selfA._parameters["Minimizer"] == "LBFGSB":
3115 if "0.19" <= scipy.version.version <= "1.1.0":
3116 import lbfgsbhlt as optimiseur
3118 import scipy.optimize as optimiseur
3119 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3120 func = CostFunction,
3122 fprime = GradientOfCostFunction,
3124 bounds = selfA._parameters["Bounds"],
3125 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
3126 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
3127 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3128 iprint = selfA._parameters["optiprint"],
3130 nfeval = Informations['funcalls']
3131 rc = Informations['warnflag']
3132 elif selfA._parameters["Minimizer"] == "TNC":
3133 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3134 func = CostFunction,
3136 fprime = GradientOfCostFunction,
3138 bounds = selfA._parameters["Bounds"],
3139 maxfun = selfA._parameters["MaximumNumberOfSteps"],
3140 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3141 ftol = selfA._parameters["CostDecrementTolerance"],
3142 messages = selfA._parameters["optmessages"],
3144 elif selfA._parameters["Minimizer"] == "CG":
3145 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3148 fprime = GradientOfCostFunction,
3150 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3151 gtol = selfA._parameters["GradientNormTolerance"],
3152 disp = selfA._parameters["optdisp"],
3155 elif selfA._parameters["Minimizer"] == "NCG":
3156 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3159 fprime = GradientOfCostFunction,
3161 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3162 avextol = selfA._parameters["CostDecrementTolerance"],
3163 disp = selfA._parameters["optdisp"],
3166 elif selfA._parameters["Minimizer"] == "BFGS":
3167 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3170 fprime = GradientOfCostFunction,
3172 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3173 gtol = selfA._parameters["GradientNormTolerance"],
3174 disp = selfA._parameters["optdisp"],
3178 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3180 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3181 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3183 # Correction pour pallier a un bug de TNC sur le retour du Minimum
3184 # ----------------------------------------------------------------
3185 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3186 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3187 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
3189 Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
3191 # Obtention de l'analyse
3192 # ----------------------
3195 selfA.StoredVariables["Analysis"].store( Xa )
3197 if selfA._toStore("OMA") or \
3198 selfA._toStore("SigmaObs2") or \
3199 selfA._toStore("SimulationQuantiles") or \
3200 selfA._toStore("SimulatedObservationAtOptimum"):
3201 if selfA._toStore("SimulatedObservationAtCurrentState"):
3202 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3203 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3204 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3208 # Calcul de la covariance d'analyse
3209 # ---------------------------------
3210 if selfA._toStore("APosterioriCovariance") or \
3211 selfA._toStore("SimulationQuantiles") or \
3212 selfA._toStore("JacobianMatrixAtOptimum") or \
3213 selfA._toStore("KalmanGainAtOptimum"):
3214 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3215 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3216 if selfA._toStore("APosterioriCovariance") or \
3217 selfA._toStore("SimulationQuantiles") or \
3218 selfA._toStore("KalmanGainAtOptimum"):
3219 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3220 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3221 if selfA._toStore("APosterioriCovariance") or \
3222 selfA._toStore("SimulationQuantiles"):
3227 _ee = numpy.matrix(numpy.zeros(nb)).T
3229 _HtEE = numpy.dot(HtM,_ee)
3230 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
3231 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
3232 HessienneI = numpy.matrix( HessienneI )
3234 if min(A.shape) != max(A.shape):
3235 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
3236 if (numpy.diag(A) < 0).any():
3237 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
3238 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
3240 L = numpy.linalg.cholesky( A )
3242 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
3243 if selfA._toStore("APosterioriCovariance"):
3244 selfA.StoredVariables["APosterioriCovariance"].store( A )
3245 if selfA._toStore("JacobianMatrixAtOptimum"):
3246 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3247 if selfA._toStore("KalmanGainAtOptimum"):
3248 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3249 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3250 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3252 # Calculs et/ou stockages supplémentaires
3253 # ---------------------------------------
3254 if selfA._toStore("Innovation") or \
3255 selfA._toStore("SigmaObs2") or \
3256 selfA._toStore("MahalanobisConsistency") or \
3257 selfA._toStore("OMB"):
3259 if selfA._toStore("Innovation"):
3260 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
3261 if selfA._toStore("BMA"):
3262 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3263 if selfA._toStore("OMA"):
3264 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3265 if selfA._toStore("OMB"):
3266 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
3267 if selfA._toStore("SigmaObs2"):
3268 TraceR = R.trace(Y.size)
3269 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
3270 if selfA._toStore("MahalanobisConsistency"):
3271 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3272 if selfA._toStore("SimulationQuantiles"):
3273 QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
3274 if selfA._toStore("SimulatedObservationAtBackground"):
3275 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
3276 if selfA._toStore("SimulatedObservationAtOptimum"):
3277 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
3281 # ==============================================================================
3282 if __name__ == "__main__":
3283 print('\n AUTODIAGNOSTIC\n')