1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les objets numériques génériques.
26 __author__ = "Jean-Philippe ARGAUD"
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
36 # ==============================================================================
37 def ExecuteFunction( paire ):
38 assert len(paire) == 2, "Incorrect number of arguments"
40 __X = numpy.asmatrix(numpy.ravel( X )).T
41 __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42 __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43 __fonction = getattr(__module,funcrepr["__userFunction__name"])
44 sys.path = __sys_path_tmp ; del __sys_path_tmp
45 __HX = __fonction( __X )
46 return numpy.ravel( __HX )
48 # ==============================================================================
49 class FDApproximation(object):
51 Cette classe sert d'interface pour définir les opérateurs approximés. A la
52 création d'un objet, en fournissant une fonction "Function", on obtient un
53 objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
54 "AdjointOperator". On contrôle l'approximation DF avec l'incrément
55 multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
56 "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
57 centrées si le booléen "centeredDF" est vrai.
60 name = "FDApproximation",
65 avoidingRedundancy = True,
66 toleranceInRedundancy = 1.e-18,
67 lenghtOfRedundancy = -1,
72 self.__name = str(name)
75 import multiprocessing
76 self.__mpEnabled = True
78 self.__mpEnabled = False
80 self.__mpEnabled = False
81 self.__mpWorkers = mpWorkers
82 if self.__mpWorkers is not None and self.__mpWorkers < 1:
83 self.__mpWorkers = None
84 logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
87 self.__mfEnabled = True
89 self.__mfEnabled = False
90 logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
92 if avoidingRedundancy:
94 self.__tolerBP = float(toleranceInRedundancy)
95 self.__lenghtRJ = int(lenghtOfRedundancy)
96 self.__listJPCP = [] # Jacobian Previous Calculated Points
97 self.__listJPCI = [] # Jacobian Previous Calculated Increment
98 self.__listJPCR = [] # Jacobian Previous Calculated Results
99 self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
100 self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
102 self.__avoidRC = False
105 if isinstance(Function,types.FunctionType):
106 logging.debug("FDA Calculs en multiprocessing : FunctionType")
107 self.__userFunction__name = Function.__name__
109 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
111 mod = os.path.abspath(Function.__globals__['__file__'])
112 if not os.path.isfile(mod):
113 raise ImportError("No user defined function or method found with the name %s"%(mod,))
114 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
115 self.__userFunction__path = os.path.dirname(mod)
117 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
118 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
119 elif isinstance(Function,types.MethodType):
120 logging.debug("FDA Calculs en multiprocessing : MethodType")
121 self.__userFunction__name = Function.__name__
123 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
125 mod = os.path.abspath(Function.__func__.__globals__['__file__'])
126 if not os.path.isfile(mod):
127 raise ImportError("No user defined function or method found with the name %s"%(mod,))
128 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
129 self.__userFunction__path = os.path.dirname(mod)
131 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
132 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
134 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
136 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
137 self.__userFunction = self.__userOperator.appliedTo
139 self.__centeredDF = bool(centeredDF)
140 if abs(float(increment)) > 1.e-15:
141 self.__increment = float(increment)
143 self.__increment = 0.01
147 self.__dX = numpy.asmatrix(numpy.ravel( dX )).T
148 logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
150 logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
152 # ---------------------------------------------------------
153 def __doublon__(self, e, l, n, v=None):
154 __ac, __iac = False, -1
155 for i in range(len(l)-1,-1,-1):
156 if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
157 __ac, __iac = True, i
158 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
162 # ---------------------------------------------------------
163 def DirectOperator(self, X ):
165 Calcul du direct à l'aide de la fonction fournie.
167 logging.debug("FDA Calcul DirectOperator (explicite)")
169 _HX = self.__userFunction( X, argsAsSerie = True )
171 _X = numpy.asmatrix(numpy.ravel( X )).T
172 _HX = numpy.ravel(self.__userFunction( _X ))
176 # ---------------------------------------------------------
177 def TangentMatrix(self, X ):
179 Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
180 c'est-à-dire le gradient de H en X. On utilise des différences finies
181 directionnelles autour du point X. X est un numpy.matrix.
183 Différences finies centrées (approximation d'ordre 2):
184 1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
185 dX[i] à la composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
186 on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
188 2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
190 3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
192 Différences finies non centrées (approximation d'ordre 1):
193 1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
194 composante X[i] pour composer X_plus_dXi, et on calcule la réponse
195 HX_plus_dXi = H( X_plus_dXi )
196 2/ On calcule la valeur centrale HX = H(X)
197 3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
199 4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
202 logging.debug("FDA Début du calcul de la Jacobienne")
203 logging.debug("FDA Incrément de............: %s*X"%float(self.__increment))
204 logging.debug("FDA Approximation centrée...: %s"%(self.__centeredDF))
206 if X is None or len(X)==0:
207 raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
209 _X = numpy.asmatrix(numpy.ravel( X )).T
211 if self.__dX is None:
212 _dX = self.__increment * _X
214 _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
216 if (_dX == 0.).any():
219 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
221 _dX = numpy.where( _dX == 0., moyenne, _dX )
223 __alreadyCalculated = False
225 __bidon, __alreadyCalculatedP = self.__doublon__(_X, self.__listJPCP, self.__listJPPN, None)
226 __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
227 if __alreadyCalculatedP == __alreadyCalculatedI > -1:
228 __alreadyCalculated, __i = True, __alreadyCalculatedP
229 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
231 if __alreadyCalculated:
232 logging.debug("FDA Calcul Jacobienne (par récupération du doublon %i)"%__i)
233 _Jacobienne = self.__listJPCR[__i]
235 logging.debug("FDA Calcul Jacobienne (explicite)")
236 if self.__centeredDF:
238 if self.__mpEnabled and not self.__mfEnabled:
240 "__userFunction__path" : self.__userFunction__path,
241 "__userFunction__modl" : self.__userFunction__modl,
242 "__userFunction__name" : self.__userFunction__name,
245 for i in range( len(_dX) ):
247 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
248 _X_plus_dXi[i] = _X[i] + _dXi
249 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
250 _X_moins_dXi[i] = _X[i] - _dXi
252 _jobs.append( (_X_plus_dXi, funcrepr) )
253 _jobs.append( (_X_moins_dXi, funcrepr) )
255 import multiprocessing
256 self.__pool = multiprocessing.Pool(self.__mpWorkers)
257 _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
262 for i in range( len(_dX) ):
263 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
265 elif self.__mfEnabled:
267 for i in range( len(_dX) ):
269 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
270 _X_plus_dXi[i] = _X[i] + _dXi
271 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
272 _X_moins_dXi[i] = _X[i] - _dXi
274 _xserie.append( _X_plus_dXi )
275 _xserie.append( _X_moins_dXi )
277 _HX_plusmoins_dX = self.DirectOperator( _xserie )
280 for i in range( len(_dX) ):
281 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
285 for i in range( _dX.size ):
287 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
288 _X_plus_dXi[i] = _X[i] + _dXi
289 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
290 _X_moins_dXi[i] = _X[i] - _dXi
292 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
293 _HX_moins_dXi = self.DirectOperator( _X_moins_dXi )
295 _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
299 if self.__mpEnabled and not self.__mfEnabled:
301 "__userFunction__path" : self.__userFunction__path,
302 "__userFunction__modl" : self.__userFunction__modl,
303 "__userFunction__name" : self.__userFunction__name,
306 _jobs.append( (_X.A1, funcrepr) )
307 for i in range( len(_dX) ):
308 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
309 _X_plus_dXi[i] = _X[i] + _dX[i]
311 _jobs.append( (_X_plus_dXi, funcrepr) )
313 import multiprocessing
314 self.__pool = multiprocessing.Pool(self.__mpWorkers)
315 _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
319 _HX = _HX_plus_dX.pop(0)
322 for i in range( len(_dX) ):
323 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
325 elif self.__mfEnabled:
327 _xserie.append( _X.A1 )
328 for i in range( len(_dX) ):
329 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
330 _X_plus_dXi[i] = _X[i] + _dX[i]
332 _xserie.append( _X_plus_dXi )
334 _HX_plus_dX = self.DirectOperator( _xserie )
336 _HX = _HX_plus_dX.pop(0)
339 for i in range( len(_dX) ):
340 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
344 _HX = self.DirectOperator( _X )
345 for i in range( _dX.size ):
347 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
348 _X_plus_dXi[i] = _X[i] + _dXi
350 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
352 _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
355 _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
357 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
358 while len(self.__listJPCP) > self.__lenghtRJ:
359 self.__listJPCP.pop(0)
360 self.__listJPCI.pop(0)
361 self.__listJPCR.pop(0)
362 self.__listJPPN.pop(0)
363 self.__listJPIN.pop(0)
364 self.__listJPCP.append( copy.copy(_X) )
365 self.__listJPCI.append( copy.copy(_dX) )
366 self.__listJPCR.append( copy.copy(_Jacobienne) )
367 self.__listJPPN.append( numpy.linalg.norm(_X) )
368 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
370 logging.debug("FDA Fin du calcul de la Jacobienne")
374 # ---------------------------------------------------------
375 def TangentOperator(self, paire ):
377 Calcul du tangent à l'aide de la Jacobienne.
380 assert len(paire) == 1, "Incorrect lenght of arguments"
382 assert len(_paire) == 2, "Incorrect number of arguments"
384 assert len(paire) == 2, "Incorrect number of arguments"
387 _Jacobienne = self.TangentMatrix( X )
388 if dX is None or len(dX) == 0:
390 # Calcul de la forme matricielle si le second argument est None
391 # -------------------------------------------------------------
392 if self.__mfEnabled: return [_Jacobienne,]
393 else: return _Jacobienne
396 # Calcul de la valeur linéarisée de H en X appliqué à dX
397 # ------------------------------------------------------
398 _dX = numpy.asmatrix(numpy.ravel( dX )).T
399 _HtX = numpy.dot(_Jacobienne, _dX)
400 if self.__mfEnabled: return [_HtX.A1,]
403 # ---------------------------------------------------------
404 def AdjointOperator(self, paire ):
406 Calcul de l'adjoint à l'aide de la Jacobienne.
409 assert len(paire) == 1, "Incorrect lenght of arguments"
411 assert len(_paire) == 2, "Incorrect number of arguments"
413 assert len(paire) == 2, "Incorrect number of arguments"
416 _JacobienneT = self.TangentMatrix( X ).T
417 if Y is None or len(Y) == 0:
419 # Calcul de la forme matricielle si le second argument est None
420 # -------------------------------------------------------------
421 if self.__mfEnabled: return [_JacobienneT,]
422 else: return _JacobienneT
425 # Calcul de la valeur de l'adjoint en X appliqué à Y
426 # --------------------------------------------------
427 _Y = numpy.asmatrix(numpy.ravel( Y )).T
428 _HaY = numpy.dot(_JacobienneT, _Y)
429 if self.__mfEnabled: return [_HaY.A1,]
432 # ==============================================================================
444 Implémentation informatique de l'algorithme MMQR, basée sur la publication :
445 David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
446 Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
449 # Recuperation des donnees et informations initiales
450 # --------------------------------------------------
451 variables = numpy.ravel( x0 )
452 mesures = numpy.ravel( y )
453 increment = sys.float_info[0]
456 quantile = float(quantile)
458 # Calcul des parametres du MM
459 # ---------------------------
460 tn = float(toler) / n
461 e0 = -tn / math.log(tn)
462 epsilon = (e0-tn)/(1+math.log(e0))
464 # Calculs d'initialisation
465 # ------------------------
466 residus = mesures - numpy.ravel( func( variables ) )
467 poids = 1./(epsilon+numpy.abs(residus))
468 veps = 1. - 2. * quantile - residus * poids
469 lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
472 # Recherche iterative
473 # -------------------
474 while (increment > toler) and (iteration < maxfun) :
477 Derivees = numpy.array(fprime(variables))
478 Derivees = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
479 DeriveesT = Derivees.transpose()
480 M = numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
481 SM = numpy.transpose(numpy.dot( DeriveesT , veps ))
482 step = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
484 variables = variables + step
485 if bounds is not None:
486 # Attention : boucle infinie à éviter si un intervalle est trop petit
487 while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
489 variables = variables - step
490 residus = mesures - numpy.ravel( func(variables) )
491 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
493 while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
495 variables = variables - step
496 residus = mesures - numpy.ravel( func(variables) )
497 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
499 increment = lastsurrogate-surrogate
500 poids = 1./(epsilon+numpy.abs(residus))
501 veps = 1. - 2. * quantile - residus * poids
502 lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
506 Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
508 return variables, Ecart, [n,p,iteration,increment,0]
510 # ==============================================================================
511 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
512 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
514 _bgcenter = numpy.ravel(_bgcenter)[:,None]
516 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
518 if _bgcovariance is None:
519 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
521 _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
522 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
524 return BackgroundEnsemble
526 # ==============================================================================
527 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
528 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
529 def __CenteredRandomAnomalies(Zr, N):
531 Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
532 notes manuscrites de MB et conforme au code de PS avec eps = -1
535 Q = numpy.eye(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
536 Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
537 R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
542 _bgcenter = numpy.ravel(_bgcenter)[:,None]
544 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
545 if _bgcovariance is None:
546 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
549 U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
550 _nbctl = _bgcenter.size
551 if _nbmembers > _nbctl:
552 _Z = numpy.concatenate((numpy.dot(
553 numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
554 numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
556 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
557 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
558 BackgroundEnsemble = _bgcenter + _Zca
560 if max(abs(_bgcovariance.flatten())) > 0:
561 _nbctl = _bgcenter.size
562 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
563 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
564 BackgroundEnsemble = _bgcenter + _Zca
566 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
568 return BackgroundEnsemble
570 # ==============================================================================
571 def EnsembleOfAnomalies( _ensemble, _optmean = None):
572 "Renvoie les anomalies centrées à partir d'un ensemble TailleEtat*NbMembres"
574 Em = numpy.asarray(_ensemble).mean(axis=1, dtype=mfp).astype('float')[:,numpy.newaxis]
576 Em = numpy.ravel(_optmean)[:,numpy.newaxis]
578 return numpy.asarray(_ensemble) - Em
580 # ==============================================================================
581 def CovarianceInflation(
583 InflationType = None,
584 InflationFactor = None,
585 BackgroundCov = None,
588 Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
590 Synthèse : Hunt 2007, section 2.3.5
592 if InflationFactor is None:
595 InflationFactor = float(InflationFactor)
597 if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
598 if InflationFactor < 1.:
599 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
600 if InflationFactor < 1.+mpr:
602 OutputCovOrEns = InflationFactor**2 * InputCovOrEns
604 elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
605 if InflationFactor < 1.:
606 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
607 if InflationFactor < 1.+mpr:
609 InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
610 OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
611 + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
613 elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
614 if InflationFactor < 0.:
615 raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
616 if InflationFactor < mpr:
618 __n, __m = numpy.asarray(InputCovOrEns).shape
620 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
621 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.eye(__n)
623 elif InflationType == "HybridOnBackgroundCovariance":
624 if InflationFactor < 0.:
625 raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
626 if InflationFactor < mpr:
628 __n, __m = numpy.asarray(InputCovOrEns).shape
630 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
631 if BackgroundCov is None:
632 raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
633 if InputCovOrEns.shape != BackgroundCov.shape:
634 raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
635 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
637 elif InflationType == "Relaxation":
638 raise NotImplementedError("InflationType Relaxation")
641 raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
643 return OutputCovOrEns
645 # ==============================================================================
646 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
648 3DVAR (Bouttier 1999, Courtier 1993)
650 selfA est identique au "self" d'algorithme appelant et contient les
654 # Correction pour pallier a un bug de TNC sur le retour du Minimum
655 if "Minimizer" in selfA._parameters and selfA._parameters["Minimizer"] == "TNC":
656 selfA.setParameterValue("StoreInternalVariables",True)
660 Hm = HO["Direct"].appliedTo
661 Ha = HO["Adjoint"].appliedInXTo
663 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
664 # ----------------------------------------------------
665 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
666 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
669 HXb = numpy.asmatrix(numpy.ravel( HXb )).T
670 if Y.size != HXb.size:
671 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
672 if max(Y.shape) != max(HXb.shape):
673 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
675 if selfA._toStore("JacobianMatrixAtBackground"):
676 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
677 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
678 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
680 # Précalcul des inversions de B et R
681 # ----------------------------------
685 # Point de démarrage de l'optimisation
686 # ------------------------------------
687 Xini = selfA._parameters["InitializationPoint"]
689 # Définition de la fonction-coût
690 # ------------------------------
692 _X = numpy.asmatrix(numpy.ravel( x )).T
693 if selfA._parameters["StoreInternalVariables"] or \
694 selfA._toStore("CurrentState") or \
695 selfA._toStore("CurrentOptimum"):
696 selfA.StoredVariables["CurrentState"].store( _X )
698 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
699 _Innovation = Y - _HX
700 if selfA._toStore("SimulatedObservationAtCurrentState") or \
701 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
702 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
703 if selfA._toStore("InnovationAtCurrentState"):
704 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
706 Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
707 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
710 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
711 selfA.StoredVariables["CostFunctionJb"].store( Jb )
712 selfA.StoredVariables["CostFunctionJo"].store( Jo )
713 selfA.StoredVariables["CostFunctionJ" ].store( J )
714 if selfA._toStore("IndexOfOptimum") or \
715 selfA._toStore("CurrentOptimum") or \
716 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
717 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
718 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
719 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
720 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
721 if selfA._toStore("IndexOfOptimum"):
722 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
723 if selfA._toStore("CurrentOptimum"):
724 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
725 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
726 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
727 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
728 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
729 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
730 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
731 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
732 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
735 def GradientOfCostFunction(x):
736 _X = numpy.asmatrix(numpy.ravel( x )).T
738 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
739 GradJb = BI * (_X - Xb)
740 GradJo = - Ha( (_X, RI * (Y - _HX)) )
741 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
744 # Minimisation de la fonctionnelle
745 # --------------------------------
746 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
748 if selfA._parameters["Minimizer"] == "LBFGSB":
749 if "0.19" <= scipy.version.version <= "1.1.0":
750 import lbfgsbhlt as optimiseur
752 import scipy.optimize as optimiseur
753 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
756 fprime = GradientOfCostFunction,
758 bounds = selfA._parameters["Bounds"],
759 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
760 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
761 pgtol = selfA._parameters["ProjectedGradientTolerance"],
762 iprint = selfA._parameters["optiprint"],
764 nfeval = Informations['funcalls']
765 rc = Informations['warnflag']
766 elif selfA._parameters["Minimizer"] == "TNC":
767 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
770 fprime = GradientOfCostFunction,
772 bounds = selfA._parameters["Bounds"],
773 maxfun = selfA._parameters["MaximumNumberOfSteps"],
774 pgtol = selfA._parameters["ProjectedGradientTolerance"],
775 ftol = selfA._parameters["CostDecrementTolerance"],
776 messages = selfA._parameters["optmessages"],
778 elif selfA._parameters["Minimizer"] == "CG":
779 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
782 fprime = GradientOfCostFunction,
784 maxiter = selfA._parameters["MaximumNumberOfSteps"],
785 gtol = selfA._parameters["GradientNormTolerance"],
786 disp = selfA._parameters["optdisp"],
789 elif selfA._parameters["Minimizer"] == "NCG":
790 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
793 fprime = GradientOfCostFunction,
795 maxiter = selfA._parameters["MaximumNumberOfSteps"],
796 avextol = selfA._parameters["CostDecrementTolerance"],
797 disp = selfA._parameters["optdisp"],
800 elif selfA._parameters["Minimizer"] == "BFGS":
801 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
804 fprime = GradientOfCostFunction,
806 maxiter = selfA._parameters["MaximumNumberOfSteps"],
807 gtol = selfA._parameters["GradientNormTolerance"],
808 disp = selfA._parameters["optdisp"],
812 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
814 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
815 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
817 # Correction pour pallier a un bug de TNC sur le retour du Minimum
818 # ----------------------------------------------------------------
819 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
820 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
822 # Obtention de l'analyse
823 # ----------------------
824 Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
826 selfA.StoredVariables["Analysis"].store( Xa.A1 )
828 if selfA._toStore("OMA") or \
829 selfA._toStore("SigmaObs2") or \
830 selfA._toStore("SimulationQuantiles") or \
831 selfA._toStore("SimulatedObservationAtOptimum"):
832 if selfA._toStore("SimulatedObservationAtCurrentState"):
833 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
834 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
835 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
839 # Calcul de la covariance d'analyse
840 # ---------------------------------
841 if selfA._toStore("APosterioriCovariance") or \
842 selfA._toStore("SimulationQuantiles") or \
843 selfA._toStore("JacobianMatrixAtOptimum") or \
844 selfA._toStore("KalmanGainAtOptimum"):
845 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
846 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
847 if selfA._toStore("APosterioriCovariance") or \
848 selfA._toStore("SimulationQuantiles") or \
849 selfA._toStore("KalmanGainAtOptimum"):
850 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
851 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
852 if selfA._toStore("APosterioriCovariance") or \
853 selfA._toStore("SimulationQuantiles"):
857 _ee = numpy.matrix(numpy.zeros(nb)).T
859 _HtEE = numpy.dot(HtM,_ee)
860 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
861 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
862 HessienneI = numpy.matrix( HessienneI )
864 if min(A.shape) != max(A.shape):
865 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
866 if (numpy.diag(A) < 0).any():
867 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
868 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
870 L = numpy.linalg.cholesky( A )
872 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
873 if selfA._toStore("APosterioriCovariance"):
874 selfA.StoredVariables["APosterioriCovariance"].store( A )
875 if selfA._toStore("JacobianMatrixAtOptimum"):
876 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
877 if selfA._toStore("KalmanGainAtOptimum"):
878 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
879 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
880 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
882 # Calculs et/ou stockages supplémentaires
883 # ---------------------------------------
884 if selfA._toStore("Innovation") or \
885 selfA._toStore("SigmaObs2") or \
886 selfA._toStore("MahalanobisConsistency") or \
887 selfA._toStore("OMB"):
889 if selfA._toStore("Innovation"):
890 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
891 if selfA._toStore("BMA"):
892 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
893 if selfA._toStore("OMA"):
894 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
895 if selfA._toStore("OMB"):
896 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
897 if selfA._toStore("SigmaObs2"):
898 TraceR = R.trace(Y.size)
899 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
900 if selfA._toStore("MahalanobisConsistency"):
901 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
902 if selfA._toStore("SimulationQuantiles"):
903 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
904 HXa = numpy.matrix(numpy.ravel( HXa )).T
906 for i in range(nech):
907 if selfA._parameters["SimulationForQuantiles"] == "Linear":
908 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
909 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
911 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
912 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
913 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
917 YfQ = numpy.hstack((YfQ,Yr))
920 for quantile in selfA._parameters["Quantiles"]:
921 if not (0. <= float(quantile) <= 1.): continue
922 indice = int(nech * float(quantile) - 1./nech)
923 if YQ is None: YQ = YfQ[:,indice]
924 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
925 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
926 if selfA._toStore("SimulatedObservationAtBackground"):
927 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
928 if selfA._toStore("SimulatedObservationAtOptimum"):
929 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
933 # ==============================================================================
934 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
936 3DVAR variational analysis with no inversion of B (Huang 2000)
938 selfA est identique au "self" d'algorithme appelant et contient les
942 # Correction pour pallier a un bug de TNC sur le retour du Minimum
943 if "Minimizer" in selfA._parameters and selfA._parameters["Minimizer"] == "TNC":
944 selfA.setParameterValue("StoreInternalVariables",True)
948 Hm = HO["Direct"].appliedTo
949 Ha = HO["Adjoint"].appliedInXTo
951 # Précalcul des inversions de B et R
955 # Point de démarrage de l'optimisation
956 Xini = numpy.zeros(Xb.shape)
958 # Définition de la fonction-coût
959 # ------------------------------
961 _V = numpy.asmatrix(numpy.ravel( v )).T
963 if selfA._parameters["StoreInternalVariables"] or \
964 selfA._toStore("CurrentState") or \
965 selfA._toStore("CurrentOptimum"):
966 selfA.StoredVariables["CurrentState"].store( _X )
968 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
969 _Innovation = Y - _HX
970 if selfA._toStore("SimulatedObservationAtCurrentState") or \
971 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
972 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
973 if selfA._toStore("InnovationAtCurrentState"):
974 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
976 Jb = float( 0.5 * _V.T * BT * _V )
977 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
980 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
981 selfA.StoredVariables["CostFunctionJb"].store( Jb )
982 selfA.StoredVariables["CostFunctionJo"].store( Jo )
983 selfA.StoredVariables["CostFunctionJ" ].store( J )
984 if selfA._toStore("IndexOfOptimum") or \
985 selfA._toStore("CurrentOptimum") or \
986 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
987 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
988 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
989 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
990 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
991 if selfA._toStore("IndexOfOptimum"):
992 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
993 if selfA._toStore("CurrentOptimum"):
994 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
995 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
996 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
997 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
998 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
999 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1000 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1001 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1002 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1005 def GradientOfCostFunction(v):
1006 _V = numpy.asmatrix(numpy.ravel( v )).T
1009 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
1011 GradJo = - Ha( (_X, RI * (Y - _HX)) )
1012 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1015 # Minimisation de la fonctionnelle
1016 # --------------------------------
1017 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1019 if selfA._parameters["Minimizer"] == "LBFGSB":
1020 if "0.19" <= scipy.version.version <= "1.1.0":
1021 import lbfgsbhlt as optimiseur
1023 import scipy.optimize as optimiseur
1024 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1025 func = CostFunction,
1027 fprime = GradientOfCostFunction,
1029 bounds = selfA._parameters["Bounds"],
1030 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
1031 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
1032 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1033 iprint = selfA._parameters["optiprint"],
1035 nfeval = Informations['funcalls']
1036 rc = Informations['warnflag']
1037 elif selfA._parameters["Minimizer"] == "TNC":
1038 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1039 func = CostFunction,
1041 fprime = GradientOfCostFunction,
1043 bounds = selfA._parameters["Bounds"],
1044 maxfun = selfA._parameters["MaximumNumberOfSteps"],
1045 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1046 ftol = selfA._parameters["CostDecrementTolerance"],
1047 messages = selfA._parameters["optmessages"],
1049 elif selfA._parameters["Minimizer"] == "CG":
1050 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1053 fprime = GradientOfCostFunction,
1055 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1056 gtol = selfA._parameters["GradientNormTolerance"],
1057 disp = selfA._parameters["optdisp"],
1060 elif selfA._parameters["Minimizer"] == "NCG":
1061 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1064 fprime = GradientOfCostFunction,
1066 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1067 avextol = selfA._parameters["CostDecrementTolerance"],
1068 disp = selfA._parameters["optdisp"],
1071 elif selfA._parameters["Minimizer"] == "BFGS":
1072 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1075 fprime = GradientOfCostFunction,
1077 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1078 gtol = selfA._parameters["GradientNormTolerance"],
1079 disp = selfA._parameters["optdisp"],
1083 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1085 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1086 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1088 # Correction pour pallier a un bug de TNC sur le retour du Minimum
1089 # ----------------------------------------------------------------
1090 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1091 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1092 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1094 Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
1096 # Obtention de l'analyse
1097 # ----------------------
1100 selfA.StoredVariables["Analysis"].store( Xa )
1102 if selfA._toStore("OMA") or \
1103 selfA._toStore("SigmaObs2") or \
1104 selfA._toStore("SimulationQuantiles") or \
1105 selfA._toStore("SimulatedObservationAtOptimum"):
1106 if selfA._toStore("SimulatedObservationAtCurrentState"):
1107 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1108 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1109 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1113 # Calcul de la covariance d'analyse
1114 # ---------------------------------
1115 if selfA._toStore("APosterioriCovariance") or \
1116 selfA._toStore("SimulationQuantiles") or \
1117 selfA._toStore("JacobianMatrixAtOptimum") or \
1118 selfA._toStore("KalmanGainAtOptimum"):
1119 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1120 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1121 if selfA._toStore("APosterioriCovariance") or \
1122 selfA._toStore("SimulationQuantiles") or \
1123 selfA._toStore("KalmanGainAtOptimum"):
1124 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1125 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1126 if selfA._toStore("APosterioriCovariance") or \
1127 selfA._toStore("SimulationQuantiles"):
1132 _ee = numpy.matrix(numpy.zeros(nb)).T
1134 _HtEE = numpy.dot(HtM,_ee)
1135 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
1136 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1137 HessienneI = numpy.matrix( HessienneI )
1139 if min(A.shape) != max(A.shape):
1140 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1141 if (numpy.diag(A) < 0).any():
1142 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1143 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1145 L = numpy.linalg.cholesky( A )
1147 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1148 if selfA._toStore("APosterioriCovariance"):
1149 selfA.StoredVariables["APosterioriCovariance"].store( A )
1150 if selfA._toStore("JacobianMatrixAtOptimum"):
1151 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1152 if selfA._toStore("KalmanGainAtOptimum"):
1153 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1154 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1155 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1157 # Calculs et/ou stockages supplémentaires
1158 # ---------------------------------------
1159 if selfA._toStore("Innovation") or \
1160 selfA._toStore("SigmaObs2") or \
1161 selfA._toStore("MahalanobisConsistency") or \
1162 selfA._toStore("OMB"):
1164 if selfA._toStore("Innovation"):
1165 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1166 if selfA._toStore("BMA"):
1167 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1168 if selfA._toStore("OMA"):
1169 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1170 if selfA._toStore("OMB"):
1171 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1172 if selfA._toStore("SigmaObs2"):
1173 TraceR = R.trace(Y.size)
1174 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1175 if selfA._toStore("MahalanobisConsistency"):
1176 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1177 if selfA._toStore("SimulationQuantiles"):
1178 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1179 HXa = numpy.matrix(numpy.ravel( HXa )).T
1181 for i in range(nech):
1182 if selfA._parameters["SimulationForQuantiles"] == "Linear":
1183 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1184 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1186 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1187 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1188 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1192 YfQ = numpy.hstack((YfQ,Yr))
1195 for quantile in selfA._parameters["Quantiles"]:
1196 if not (0. <= float(quantile) <= 1.): continue
1197 indice = int(nech * float(quantile) - 1./nech)
1198 if YQ is None: YQ = YfQ[:,indice]
1199 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
1200 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1201 if selfA._toStore("SimulatedObservationAtBackground"):
1202 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1203 if selfA._toStore("SimulatedObservationAtOptimum"):
1204 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1208 # ==============================================================================
1209 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1211 3DVAR incrémental (Courtier 1994, 1997)
1213 selfA est identique au "self" d'algorithme appelant et contient les
1217 # Correction pour pallier a un bug de TNC sur le retour du Minimum
1218 if "Minimizer" in selfA._parameters and selfA._parameters["Minimizer"] == "TNC":
1219 selfA.setParameterValue("StoreInternalVariables",True)
1224 # Opérateur non-linéaire pour la boucle externe
1225 Hm = HO["Direct"].appliedTo
1227 # Précalcul des inversions de B et R
1231 # Point de démarrage de l'optimisation
1232 Xini = selfA._parameters["InitializationPoint"]
1234 HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
1235 Innovation = Y - HXb
1242 Xr = Xini.reshape((-1,1))
1243 while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
1247 Ht = HO["Tangent"].asMatrix(Xr)
1248 Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
1250 # Définition de la fonction-coût
1251 # ------------------------------
1252 def CostFunction(dx):
1253 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1254 if selfA._parameters["StoreInternalVariables"] or \
1255 selfA._toStore("CurrentState") or \
1256 selfA._toStore("CurrentOptimum"):
1257 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
1259 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1260 _dInnovation = Innovation - _HdX
1261 if selfA._toStore("SimulatedObservationAtCurrentState") or \
1262 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1263 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
1264 if selfA._toStore("InnovationAtCurrentState"):
1265 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
1267 Jb = float( 0.5 * _dX.T * BI * _dX )
1268 Jo = float( 0.5 * _dInnovation.T * RI * _dInnovation )
1271 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1272 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1273 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1274 selfA.StoredVariables["CostFunctionJ" ].store( J )
1275 if selfA._toStore("IndexOfOptimum") or \
1276 selfA._toStore("CurrentOptimum") or \
1277 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1278 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1279 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1280 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1281 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1282 if selfA._toStore("IndexOfOptimum"):
1283 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1284 if selfA._toStore("CurrentOptimum"):
1285 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1286 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1287 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1288 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1289 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1290 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1291 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1292 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1293 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1296 def GradientOfCostFunction(dx):
1297 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1299 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1300 _dInnovation = Innovation - _HdX
1302 GradJo = - Ht.T @ (RI * _dInnovation)
1303 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1306 # Minimisation de la fonctionnelle
1307 # --------------------------------
1308 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1310 if selfA._parameters["Minimizer"] == "LBFGSB":
1311 # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1312 if "0.19" <= scipy.version.version <= "1.1.0":
1313 import lbfgsbhlt as optimiseur
1315 import scipy.optimize as optimiseur
1316 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1317 func = CostFunction,
1318 x0 = numpy.zeros(Xini.size),
1319 fprime = GradientOfCostFunction,
1321 bounds = selfA._parameters["Bounds"],
1322 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
1323 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
1324 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1325 iprint = selfA._parameters["optiprint"],
1327 nfeval = Informations['funcalls']
1328 rc = Informations['warnflag']
1329 elif selfA._parameters["Minimizer"] == "TNC":
1330 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1331 func = CostFunction,
1332 x0 = numpy.zeros(Xini.size),
1333 fprime = GradientOfCostFunction,
1335 bounds = selfA._parameters["Bounds"],
1336 maxfun = selfA._parameters["MaximumNumberOfSteps"],
1337 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1338 ftol = selfA._parameters["CostDecrementTolerance"],
1339 messages = selfA._parameters["optmessages"],
1341 elif selfA._parameters["Minimizer"] == "CG":
1342 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1344 x0 = numpy.zeros(Xini.size),
1345 fprime = GradientOfCostFunction,
1347 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1348 gtol = selfA._parameters["GradientNormTolerance"],
1349 disp = selfA._parameters["optdisp"],
1352 elif selfA._parameters["Minimizer"] == "NCG":
1353 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1355 x0 = numpy.zeros(Xini.size),
1356 fprime = GradientOfCostFunction,
1358 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1359 avextol = selfA._parameters["CostDecrementTolerance"],
1360 disp = selfA._parameters["optdisp"],
1363 elif selfA._parameters["Minimizer"] == "BFGS":
1364 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1366 x0 = numpy.zeros(Xini.size),
1367 fprime = GradientOfCostFunction,
1369 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1370 gtol = selfA._parameters["GradientNormTolerance"],
1371 disp = selfA._parameters["optdisp"],
1375 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1377 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1378 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1380 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1381 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1382 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1384 Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
1387 DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
1388 iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
1390 # Obtention de l'analyse
1391 # ----------------------
1394 selfA.StoredVariables["Analysis"].store( Xa )
1396 if selfA._toStore("OMA") or \
1397 selfA._toStore("SigmaObs2") or \
1398 selfA._toStore("SimulationQuantiles") or \
1399 selfA._toStore("SimulatedObservationAtOptimum"):
1400 if selfA._toStore("SimulatedObservationAtCurrentState"):
1401 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1402 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1403 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1407 # Calcul de la covariance d'analyse
1408 # ---------------------------------
1409 if selfA._toStore("APosterioriCovariance") or \
1410 selfA._toStore("SimulationQuantiles") or \
1411 selfA._toStore("JacobianMatrixAtOptimum") or \
1412 selfA._toStore("KalmanGainAtOptimum"):
1413 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1414 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1415 if selfA._toStore("APosterioriCovariance") or \
1416 selfA._toStore("SimulationQuantiles") or \
1417 selfA._toStore("KalmanGainAtOptimum"):
1418 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1419 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1420 if selfA._toStore("APosterioriCovariance") or \
1421 selfA._toStore("SimulationQuantiles"):
1425 _ee = numpy.matrix(numpy.zeros(nb)).T
1427 _HtEE = numpy.dot(HtM,_ee)
1428 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
1429 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1430 HessienneI = numpy.matrix( HessienneI )
1432 if min(A.shape) != max(A.shape):
1433 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1434 if (numpy.diag(A) < 0).any():
1435 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1436 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1438 L = numpy.linalg.cholesky( A )
1440 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1441 if selfA._toStore("APosterioriCovariance"):
1442 selfA.StoredVariables["APosterioriCovariance"].store( A )
1443 if selfA._toStore("JacobianMatrixAtOptimum"):
1444 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1445 if selfA._toStore("KalmanGainAtOptimum"):
1446 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1447 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1448 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1450 # Calculs et/ou stockages supplémentaires
1451 # ---------------------------------------
1452 if selfA._toStore("Innovation") or \
1453 selfA._toStore("SigmaObs2") or \
1454 selfA._toStore("MahalanobisConsistency") or \
1455 selfA._toStore("OMB"):
1457 if selfA._toStore("Innovation"):
1458 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1459 if selfA._toStore("BMA"):
1460 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1461 if selfA._toStore("OMA"):
1462 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1463 if selfA._toStore("OMB"):
1464 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1465 if selfA._toStore("SigmaObs2"):
1466 TraceR = R.trace(Y.size)
1467 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1468 if selfA._toStore("MahalanobisConsistency"):
1469 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1470 if selfA._toStore("SimulationQuantiles"):
1471 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1472 HXa = numpy.matrix(numpy.ravel( HXa )).T
1474 for i in range(nech):
1475 if selfA._parameters["SimulationForQuantiles"] == "Linear":
1476 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1477 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1479 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1480 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1481 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1485 YfQ = numpy.hstack((YfQ,Yr))
1488 for quantile in selfA._parameters["Quantiles"]:
1489 if not (0. <= float(quantile) <= 1.): continue
1490 indice = int(nech * float(quantile) - 1./nech)
1491 if YQ is None: YQ = YfQ[:,indice]
1492 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
1493 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1494 if selfA._toStore("SimulatedObservationAtBackground"):
1495 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1496 if selfA._toStore("SimulatedObservationAtOptimum"):
1497 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1501 # ==============================================================================
1502 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1504 3DVAR PSAS (Huang 2000)
1506 selfA est identique au "self" d'algorithme appelant et contient les
1510 # Correction pour pallier a un bug de TNC sur le retour du Minimum
1511 if "Minimizer" in selfA._parameters and selfA._parameters["Minimizer"] == "TNC":
1512 selfA.setParameterValue("StoreInternalVariables",True)
1518 Hm = HO["Direct"].appliedTo
1520 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
1521 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
1522 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
1525 HXb = numpy.asmatrix(numpy.ravel( HXb )).T
1526 if Y.size != HXb.size:
1527 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
1528 if max(Y.shape) != max(HXb.shape):
1529 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
1531 if selfA._toStore("JacobianMatrixAtBackground"):
1532 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
1533 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
1534 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
1536 Ht = HO["Tangent"].asMatrix(Xb)
1538 HBHTpR = R + Ht * BHT
1539 Innovation = Y - HXb
1541 # Point de démarrage de l'optimisation
1542 Xini = numpy.zeros(Xb.shape)
1544 # Définition de la fonction-coût
1545 # ------------------------------
1546 def CostFunction(w):
1547 _W = numpy.asmatrix(numpy.ravel( w )).T
1548 if selfA._parameters["StoreInternalVariables"] or \
1549 selfA._toStore("CurrentState") or \
1550 selfA._toStore("CurrentOptimum"):
1551 selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
1552 if selfA._toStore("SimulatedObservationAtCurrentState") or \
1553 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1554 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
1555 if selfA._toStore("InnovationAtCurrentState"):
1556 selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
1558 Jb = float( 0.5 * _W.T * HBHTpR * _W )
1559 Jo = float( - _W.T * Innovation )
1562 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1563 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1564 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1565 selfA.StoredVariables["CostFunctionJ" ].store( J )
1566 if selfA._toStore("IndexOfOptimum") or \
1567 selfA._toStore("CurrentOptimum") or \
1568 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1569 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1570 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1571 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1572 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1573 if selfA._toStore("IndexOfOptimum"):
1574 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1575 if selfA._toStore("CurrentOptimum"):
1576 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1577 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1578 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1579 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1580 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1581 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1582 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1583 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1584 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1587 def GradientOfCostFunction(w):
1588 _W = numpy.asmatrix(numpy.ravel( w )).T
1589 GradJb = HBHTpR * _W
1590 GradJo = - Innovation
1591 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1594 # Minimisation de la fonctionnelle
1595 # --------------------------------
1596 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1598 if selfA._parameters["Minimizer"] == "LBFGSB":
1599 # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1600 if "0.19" <= scipy.version.version <= "1.1.0":
1601 import lbfgsbhlt as optimiseur
1603 import scipy.optimize as optimiseur
1604 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1605 func = CostFunction,
1607 fprime = GradientOfCostFunction,
1609 bounds = selfA._parameters["Bounds"],
1610 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
1611 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
1612 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1613 iprint = selfA._parameters["optiprint"],
1615 nfeval = Informations['funcalls']
1616 rc = Informations['warnflag']
1617 elif selfA._parameters["Minimizer"] == "TNC":
1618 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1619 func = CostFunction,
1621 fprime = GradientOfCostFunction,
1623 bounds = selfA._parameters["Bounds"],
1624 maxfun = selfA._parameters["MaximumNumberOfSteps"],
1625 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1626 ftol = selfA._parameters["CostDecrementTolerance"],
1627 messages = selfA._parameters["optmessages"],
1629 elif selfA._parameters["Minimizer"] == "CG":
1630 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1633 fprime = GradientOfCostFunction,
1635 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1636 gtol = selfA._parameters["GradientNormTolerance"],
1637 disp = selfA._parameters["optdisp"],
1640 elif selfA._parameters["Minimizer"] == "NCG":
1641 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1644 fprime = GradientOfCostFunction,
1646 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1647 avextol = selfA._parameters["CostDecrementTolerance"],
1648 disp = selfA._parameters["optdisp"],
1651 elif selfA._parameters["Minimizer"] == "BFGS":
1652 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1655 fprime = GradientOfCostFunction,
1657 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1658 gtol = selfA._parameters["GradientNormTolerance"],
1659 disp = selfA._parameters["optdisp"],
1663 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1665 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1666 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1668 # Correction pour pallier a un bug de TNC sur le retour du Minimum
1669 # ----------------------------------------------------------------
1670 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1671 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1672 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1674 Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
1676 # Obtention de l'analyse
1677 # ----------------------
1680 selfA.StoredVariables["Analysis"].store( Xa )
1682 if selfA._toStore("OMA") or \
1683 selfA._toStore("SigmaObs2") or \
1684 selfA._toStore("SimulationQuantiles") or \
1685 selfA._toStore("SimulatedObservationAtOptimum"):
1686 if selfA._toStore("SimulatedObservationAtCurrentState"):
1687 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1688 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1689 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1693 # Calcul de la covariance d'analyse
1694 # ---------------------------------
1695 if selfA._toStore("APosterioriCovariance") or \
1696 selfA._toStore("SimulationQuantiles") or \
1697 selfA._toStore("JacobianMatrixAtOptimum") or \
1698 selfA._toStore("KalmanGainAtOptimum"):
1699 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1700 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1701 if selfA._toStore("APosterioriCovariance") or \
1702 selfA._toStore("SimulationQuantiles") or \
1703 selfA._toStore("KalmanGainAtOptimum"):
1704 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1705 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1706 if selfA._toStore("APosterioriCovariance") or \
1707 selfA._toStore("SimulationQuantiles"):
1713 _ee = numpy.matrix(numpy.zeros(nb)).T
1715 _HtEE = numpy.dot(HtM,_ee)
1716 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
1717 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1718 HessienneI = numpy.matrix( HessienneI )
1720 if min(A.shape) != max(A.shape):
1721 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1722 if (numpy.diag(A) < 0).any():
1723 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1724 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1726 L = numpy.linalg.cholesky( A )
1728 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1729 if selfA._toStore("APosterioriCovariance"):
1730 selfA.StoredVariables["APosterioriCovariance"].store( A )
1731 if selfA._toStore("JacobianMatrixAtOptimum"):
1732 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1733 if selfA._toStore("KalmanGainAtOptimum"):
1734 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1735 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1736 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1738 # Calculs et/ou stockages supplémentaires
1739 # ---------------------------------------
1740 if selfA._toStore("Innovation") or \
1741 selfA._toStore("SigmaObs2") or \
1742 selfA._toStore("MahalanobisConsistency") or \
1743 selfA._toStore("OMB"):
1745 if selfA._toStore("Innovation"):
1746 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1747 if selfA._toStore("BMA"):
1748 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1749 if selfA._toStore("OMA"):
1750 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1751 if selfA._toStore("OMB"):
1752 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1753 if selfA._toStore("SigmaObs2"):
1754 TraceR = R.trace(Y.size)
1755 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1756 if selfA._toStore("MahalanobisConsistency"):
1757 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1758 if selfA._toStore("SimulationQuantiles"):
1759 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1760 HXa = numpy.matrix(numpy.ravel( HXa )).T
1762 for i in range(nech):
1763 if selfA._parameters["SimulationForQuantiles"] == "Linear":
1764 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1765 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1767 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1768 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1769 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1773 YfQ = numpy.hstack((YfQ,Yr))
1776 for quantile in selfA._parameters["Quantiles"]:
1777 if not (0. <= float(quantile) <= 1.): continue
1778 indice = int(nech * float(quantile) - 1./nech)
1779 if YQ is None: YQ = YfQ[:,indice]
1780 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
1781 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1782 if selfA._toStore("SimulatedObservationAtBackground"):
1783 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1784 if selfA._toStore("SimulatedObservationAtOptimum"):
1785 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1789 # ==============================================================================
1790 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
1792 Stochastic EnKF (Envensen 1994, Burgers 1998)
1794 selfA est identique au "self" d'algorithme appelant et contient les
1797 if selfA._parameters["EstimationOf"] == "Parameters":
1798 selfA._parameters["StoreInternalVariables"] = True
1802 H = HO["Direct"].appliedControledFormTo
1804 if selfA._parameters["EstimationOf"] == "State":
1805 M = EM["Direct"].appliedControledFormTo
1807 if CM is not None and "Tangent" in CM and U is not None:
1808 Cm = CM["Tangent"].asMatrix(Xb)
1812 # Nombre de pas identique au nombre de pas d'observations
1813 # -------------------------------------------------------
1814 if hasattr(Y,"stepnumber"):
1815 duration = Y.stepnumber()
1816 __p = numpy.cumprod(Y.shape())[-1]
1819 __p = numpy.array(Y).size
1821 # Précalcul des inversions de B et R
1822 # ----------------------------------
1823 if selfA._parameters["StoreInternalVariables"] \
1824 or selfA._toStore("CostFunctionJ") \
1825 or selfA._toStore("CostFunctionJb") \
1826 or selfA._toStore("CostFunctionJo") \
1827 or selfA._toStore("CurrentOptimum") \
1828 or selfA._toStore("APosterioriCovariance"):
1835 __m = selfA._parameters["NumberOfMembers"]
1836 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1838 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1840 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
1842 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1844 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1845 selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) )
1846 if selfA._toStore("APosterioriCovariance"):
1847 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1850 previousJMinimum = numpy.finfo(float).max
1852 for step in range(duration-1):
1853 if hasattr(Y,"store"):
1854 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
1856 Ynpu = numpy.ravel( Y ).reshape((__p,-1))
1859 if hasattr(U,"store") and len(U)>1:
1860 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1861 elif hasattr(U,"store") and len(U)==1:
1862 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1864 Un = numpy.asmatrix(numpy.ravel( U )).T
1868 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1869 Xn = CovarianceInflation( Xn,
1870 selfA._parameters["InflationType"],
1871 selfA._parameters["InflationFactor"],
1874 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1875 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1877 returnSerieAsArrayMatrix = True )
1878 qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
1879 Xn_predicted = EMX + qi
1880 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1882 returnSerieAsArrayMatrix = True )
1883 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1884 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1885 Xn_predicted = Xn_predicted + Cm * Un
1886 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1887 # --- > Par principe, M = Id, Q = 0
1889 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1891 returnSerieAsArrayMatrix = True )
1893 # Mean of forecast and observation of forecast
1894 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
1895 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
1897 #--------------------------
1898 if VariantM == "KalmanFilterFormula05":
1899 PfHT, HPfHT = 0., 0.
1900 for i in range(__m):
1901 Exfi = Xn_predicted[:,i].reshape((__n,-1)) - Xfm
1902 Eyfi = HX_predicted[:,i].reshape((__p,-1)) - Hfm
1903 PfHT += Exfi * Eyfi.T
1904 HPfHT += Eyfi * Eyfi.T
1905 PfHT = (1./(__m-1)) * PfHT
1906 HPfHT = (1./(__m-1)) * HPfHT
1907 Kn = PfHT * ( R + HPfHT ).I
1910 for i in range(__m):
1911 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
1912 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
1913 #--------------------------
1914 elif VariantM == "KalmanFilterFormula16":
1915 EpY = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
1916 EpYm = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
1918 EaX = EnsembleOfAnomalies( Xn_predicted ) / numpy.sqrt(__m-1)
1919 EaY = (HX_predicted - Hfm - EpY + EpYm) / numpy.sqrt(__m-1)
1921 Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
1923 for i in range(__m):
1924 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
1925 #--------------------------
1927 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1929 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1930 Xn = CovarianceInflation( Xn,
1931 selfA._parameters["InflationType"],
1932 selfA._parameters["InflationFactor"],
1935 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
1936 #--------------------------
1938 if selfA._parameters["StoreInternalVariables"] \
1939 or selfA._toStore("CostFunctionJ") \
1940 or selfA._toStore("CostFunctionJb") \
1941 or selfA._toStore("CostFunctionJo") \
1942 or selfA._toStore("APosterioriCovariance") \
1943 or selfA._toStore("InnovationAtCurrentAnalysis") \
1944 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1945 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1946 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1947 _Innovation = Ynpu - _HXa
1949 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1950 # ---> avec analysis
1951 selfA.StoredVariables["Analysis"].store( Xa )
1952 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1953 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1954 if selfA._toStore("InnovationAtCurrentAnalysis"):
1955 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1956 # ---> avec current state
1957 if selfA._parameters["StoreInternalVariables"] \
1958 or selfA._toStore("CurrentState"):
1959 selfA.StoredVariables["CurrentState"].store( Xn )
1960 if selfA._toStore("ForecastState"):
1961 selfA.StoredVariables["ForecastState"].store( EMX )
1962 if selfA._toStore("BMA"):
1963 selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
1964 if selfA._toStore("InnovationAtCurrentState"):
1965 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1966 if selfA._toStore("SimulatedObservationAtCurrentState") \
1967 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1968 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1970 if selfA._parameters["StoreInternalVariables"] \
1971 or selfA._toStore("CostFunctionJ") \
1972 or selfA._toStore("CostFunctionJb") \
1973 or selfA._toStore("CostFunctionJo") \
1974 or selfA._toStore("CurrentOptimum") \
1975 or selfA._toStore("APosterioriCovariance"):
1976 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1977 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1979 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1980 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1981 selfA.StoredVariables["CostFunctionJ" ].store( J )
1983 if selfA._toStore("IndexOfOptimum") \
1984 or selfA._toStore("CurrentOptimum") \
1985 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1986 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1987 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1988 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1989 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1990 if selfA._toStore("IndexOfOptimum"):
1991 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1992 if selfA._toStore("CurrentOptimum"):
1993 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1994 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1995 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1996 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1997 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1998 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1999 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2000 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2001 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2002 if selfA._toStore("APosterioriCovariance"):
2003 Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2005 Pn = 0.5 * (Pn + Pn.T)
2006 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2007 if selfA._parameters["EstimationOf"] == "Parameters" \
2008 and J < previousJMinimum:
2009 previousJMinimum = J
2011 if selfA._toStore("APosterioriCovariance"):
2012 covarianceXaMin = Pn
2014 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2015 # ----------------------------------------------------------------------
2016 if selfA._parameters["EstimationOf"] == "Parameters":
2017 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2018 selfA.StoredVariables["Analysis"].store( XaMin )
2019 if selfA._toStore("APosterioriCovariance"):
2020 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2021 if selfA._toStore("BMA"):
2022 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2026 # ==============================================================================
2027 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
2029 Ensemble-Transform EnKF (ETKF or Deterministic EnKF: Bishop 2001, Hunt 2007)
2031 selfA est identique au "self" d'algorithme appelant et contient les
2034 if selfA._parameters["EstimationOf"] == "Parameters":
2035 selfA._parameters["StoreInternalVariables"] = True
2039 H = HO["Direct"].appliedControledFormTo
2041 if selfA._parameters["EstimationOf"] == "State":
2042 M = EM["Direct"].appliedControledFormTo
2044 if CM is not None and "Tangent" in CM and U is not None:
2045 Cm = CM["Tangent"].asMatrix(Xb)
2049 # Nombre de pas identique au nombre de pas d'observations
2050 # -------------------------------------------------------
2051 if hasattr(Y,"stepnumber"):
2052 duration = Y.stepnumber()
2053 __p = numpy.cumprod(Y.shape())[-1]
2056 __p = numpy.array(Y).size
2058 # Précalcul des inversions de B et R
2059 # ----------------------------------
2060 if selfA._parameters["StoreInternalVariables"] \
2061 or selfA._toStore("CostFunctionJ") \
2062 or selfA._toStore("CostFunctionJb") \
2063 or selfA._toStore("CostFunctionJo") \
2064 or selfA._toStore("CurrentOptimum") \
2065 or selfA._toStore("APosterioriCovariance"):
2068 elif VariantM != "KalmanFilterFormula":
2070 if VariantM == "KalmanFilterFormula":
2071 RIdemi = R.choleskyI()
2076 __m = selfA._parameters["NumberOfMembers"]
2077 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2079 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2081 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2083 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2085 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2086 selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) )
2087 if selfA._toStore("APosterioriCovariance"):
2088 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2091 previousJMinimum = numpy.finfo(float).max
2093 for step in range(duration-1):
2094 if hasattr(Y,"store"):
2095 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
2097 Ynpu = numpy.ravel( Y ).reshape((__p,-1))
2100 if hasattr(U,"store") and len(U)>1:
2101 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2102 elif hasattr(U,"store") and len(U)==1:
2103 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2105 Un = numpy.asmatrix(numpy.ravel( U )).T
2109 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2110 Xn = CovarianceInflation( Xn,
2111 selfA._parameters["InflationType"],
2112 selfA._parameters["InflationFactor"],
2115 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2116 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2118 returnSerieAsArrayMatrix = True )
2119 qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2120 Xn_predicted = EMX + qi
2121 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2123 returnSerieAsArrayMatrix = True )
2124 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2125 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2126 Xn_predicted = Xn_predicted + Cm * Un
2127 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2128 # --- > Par principe, M = Id, Q = 0
2130 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2132 returnSerieAsArrayMatrix = True )
2134 # Mean of forecast and observation of forecast
2135 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2136 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2139 EaX = EnsembleOfAnomalies( Xn_predicted )
2140 EaHX = numpy.array(HX_predicted - Hfm)
2142 #--------------------------
2143 if VariantM == "KalmanFilterFormula":
2144 mS = RIdemi * EaHX / numpy.sqrt(__m-1)
2145 delta = RIdemi * ( Ynpu - Hfm )
2146 mT = numpy.linalg.inv( numpy.eye(__m) + mS.T @ mS )
2147 vw = mT @ mS.transpose() @ delta
2149 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
2152 EaX = EaX / numpy.sqrt(__m-1)
2153 Xn = Xfm + EaX @ ( vw.reshape((__m,-1)) + numpy.sqrt(__m-1) * Tdemi @ mU )
2154 #--------------------------
2155 elif VariantM == "Variational":
2156 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2157 def CostFunction(w):
2158 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2159 _Jo = 0.5 * _A.T @ (RI * _A)
2160 _Jb = 0.5 * (__m-1) * w.T @ w
2163 def GradientOfCostFunction(w):
2164 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2165 _GardJo = - EaHX.T @ (RI * _A)
2166 _GradJb = (__m-1) * w.reshape((__m,1))
2167 _GradJ = _GardJo + _GradJb
2168 return numpy.ravel(_GradJ)
2169 vw = scipy.optimize.fmin_cg(
2171 x0 = numpy.zeros(__m),
2172 fprime = GradientOfCostFunction,
2177 Hto = EaHX.T @ (RI * EaHX)
2178 Htb = (__m-1) * numpy.eye(__m)
2181 Pta = numpy.linalg.inv( Hta )
2182 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2184 Xn = Xfm + EaX @ (vw[:,None] + EWa)
2185 #--------------------------
2186 elif VariantM == "FiniteSize11": # Jauge Boc2011
2187 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2188 def CostFunction(w):
2189 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2190 _Jo = 0.5 * _A.T @ (RI * _A)
2191 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
2194 def GradientOfCostFunction(w):
2195 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2196 _GardJo = - EaHX.T @ (RI * _A)
2197 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
2198 _GradJ = _GardJo + _GradJb
2199 return numpy.ravel(_GradJ)
2200 vw = scipy.optimize.fmin_cg(
2202 x0 = numpy.zeros(__m),
2203 fprime = GradientOfCostFunction,
2208 Hto = EaHX.T @ (RI * EaHX)
2210 ( (1 + 1/__m + vw.T @ vw) * numpy.eye(__m) - 2 * vw @ vw.T ) \
2211 / (1 + 1/__m + vw.T @ vw)**2
2214 Pta = numpy.linalg.inv( Hta )
2215 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2217 Xn = Xfm + EaX @ (vw.reshape((__m,-1)) + EWa)
2218 #--------------------------
2219 elif VariantM == "FiniteSize15": # Jauge Boc2015
2220 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2221 def CostFunction(w):
2222 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2223 _Jo = 0.5 * _A.T * RI * _A
2224 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
2227 def GradientOfCostFunction(w):
2228 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2229 _GardJo = - EaHX.T @ (RI * _A)
2230 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
2231 _GradJ = _GardJo + _GradJb
2232 return numpy.ravel(_GradJ)
2233 vw = scipy.optimize.fmin_cg(
2235 x0 = numpy.zeros(__m),
2236 fprime = GradientOfCostFunction,
2241 Hto = EaHX.T @ (RI * EaHX)
2243 ( (1 + 1/__m + vw.T @ vw) * numpy.eye(__m) - 2 * vw @ vw.T ) \
2244 / (1 + 1/__m + vw.T @ vw)**2
2247 Pta = numpy.linalg.inv( Hta )
2248 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2250 Xn = Xfm + EaX @ (vw.reshape((__m,-1)) + EWa)
2251 #--------------------------
2252 elif VariantM == "FiniteSize16": # Jauge Boc2016
2253 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2254 def CostFunction(w):
2255 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2256 _Jo = 0.5 * _A.T @ (RI * _A)
2257 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
2260 def GradientOfCostFunction(w):
2261 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2262 _GardJo = - EaHX.T @ (RI * _A)
2263 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
2264 _GradJ = _GardJo + _GradJb
2265 return numpy.ravel(_GradJ)
2266 vw = scipy.optimize.fmin_cg(
2268 x0 = numpy.zeros(__m),
2269 fprime = GradientOfCostFunction,
2274 Hto = EaHX.T @ (RI * EaHX)
2275 Htb = ((__m+1) / (__m-1)) * \
2276 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.eye(__m) - 2 * vw @ vw.T / (__m-1) ) \
2277 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
2280 Pta = numpy.linalg.inv( Hta )
2281 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2283 Xn = Xfm + EaX @ (vw[:,None] + EWa)
2284 #--------------------------
2286 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2288 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2289 Xn = CovarianceInflation( Xn,
2290 selfA._parameters["InflationType"],
2291 selfA._parameters["InflationFactor"],
2294 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2295 #--------------------------
2297 if selfA._parameters["StoreInternalVariables"] \
2298 or selfA._toStore("CostFunctionJ") \
2299 or selfA._toStore("CostFunctionJb") \
2300 or selfA._toStore("CostFunctionJo") \
2301 or selfA._toStore("APosterioriCovariance") \
2302 or selfA._toStore("InnovationAtCurrentAnalysis") \
2303 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2304 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2305 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2306 _Innovation = Ynpu - _HXa
2308 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2309 # ---> avec analysis
2310 selfA.StoredVariables["Analysis"].store( Xa )
2311 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2312 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2313 if selfA._toStore("InnovationAtCurrentAnalysis"):
2314 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2315 # ---> avec current state
2316 if selfA._parameters["StoreInternalVariables"] \
2317 or selfA._toStore("CurrentState"):
2318 selfA.StoredVariables["CurrentState"].store( Xn )
2319 if selfA._toStore("ForecastState"):
2320 selfA.StoredVariables["ForecastState"].store( EMX )
2321 if selfA._toStore("BMA"):
2322 selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
2323 if selfA._toStore("InnovationAtCurrentState"):
2324 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu.reshape((__p,1)) )
2325 if selfA._toStore("SimulatedObservationAtCurrentState") \
2326 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2327 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2329 if selfA._parameters["StoreInternalVariables"] \
2330 or selfA._toStore("CostFunctionJ") \
2331 or selfA._toStore("CostFunctionJb") \
2332 or selfA._toStore("CostFunctionJo") \
2333 or selfA._toStore("CurrentOptimum") \
2334 or selfA._toStore("APosterioriCovariance"):
2335 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2336 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2338 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2339 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2340 selfA.StoredVariables["CostFunctionJ" ].store( J )
2342 if selfA._toStore("IndexOfOptimum") \
2343 or selfA._toStore("CurrentOptimum") \
2344 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2345 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2346 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2347 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2348 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2349 if selfA._toStore("IndexOfOptimum"):
2350 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2351 if selfA._toStore("CurrentOptimum"):
2352 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2353 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2354 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2355 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2356 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2357 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2358 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2359 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2360 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2361 if selfA._toStore("APosterioriCovariance"):
2362 Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2364 Pn = 0.5 * (Pn + Pn.T)
2365 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2366 if selfA._parameters["EstimationOf"] == "Parameters" \
2367 and J < previousJMinimum:
2368 previousJMinimum = J
2370 if selfA._toStore("APosterioriCovariance"):
2371 covarianceXaMin = Pn
2373 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2374 # ----------------------------------------------------------------------
2375 if selfA._parameters["EstimationOf"] == "Parameters":
2376 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2377 selfA.StoredVariables["Analysis"].store( XaMin )
2378 if selfA._toStore("APosterioriCovariance"):
2379 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2380 if selfA._toStore("BMA"):
2381 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2385 # ==============================================================================
2386 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
2387 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
2389 Maximum Likelihood Ensemble Filter (EnKF/MLEF Zupanski 2005, Bocquet 2013)
2391 selfA est identique au "self" d'algorithme appelant et contient les
2394 if selfA._parameters["EstimationOf"] == "Parameters":
2395 selfA._parameters["StoreInternalVariables"] = True
2399 H = HO["Direct"].appliedControledFormTo
2401 if selfA._parameters["EstimationOf"] == "State":
2402 M = EM["Direct"].appliedControledFormTo
2404 if CM is not None and "Tangent" in CM and U is not None:
2405 Cm = CM["Tangent"].asMatrix(Xb)
2409 # Nombre de pas identique au nombre de pas d'observations
2410 # -------------------------------------------------------
2411 if hasattr(Y,"stepnumber"):
2412 duration = Y.stepnumber()
2413 __p = numpy.cumprod(Y.shape())[-1]
2416 __p = numpy.array(Y).size
2418 # Précalcul des inversions de B et R
2419 # ----------------------------------
2420 if selfA._parameters["StoreInternalVariables"] \
2421 or selfA._toStore("CostFunctionJ") \
2422 or selfA._toStore("CostFunctionJb") \
2423 or selfA._toStore("CostFunctionJo") \
2424 or selfA._toStore("CurrentOptimum") \
2425 or selfA._toStore("APosterioriCovariance"):
2432 __m = selfA._parameters["NumberOfMembers"]
2433 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2435 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2437 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2439 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2441 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2442 selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) )
2443 if selfA._toStore("APosterioriCovariance"):
2444 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2447 previousJMinimum = numpy.finfo(float).max
2449 for step in range(duration-1):
2450 if hasattr(Y,"store"):
2451 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
2453 Ynpu = numpy.ravel( Y ).reshape((__p,-1))
2456 if hasattr(U,"store") and len(U)>1:
2457 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2458 elif hasattr(U,"store") and len(U)==1:
2459 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2461 Un = numpy.asmatrix(numpy.ravel( U )).T
2465 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2466 Xn = CovarianceInflation( Xn,
2467 selfA._parameters["InflationType"],
2468 selfA._parameters["InflationFactor"],
2471 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2472 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2474 returnSerieAsArrayMatrix = True )
2475 qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2476 Xn_predicted = EMX + qi
2477 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2478 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2479 Xn_predicted = Xn_predicted + Cm * Un
2480 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2481 # --- > Par principe, M = Id, Q = 0
2484 #--------------------------
2485 if VariantM == "MLEF13":
2486 Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
2487 EaX = EnsembleOfAnomalies( Xn_predicted ) / numpy.sqrt(__m-1)
2493 vw = numpy.zeros(__m)
2494 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2495 vx1 = (Xfm + EaX @ vw).reshape((__n,-1))
2498 E1 = vx1 + _epsilon * EaX
2500 E1 = vx1 + numpy.sqrt(__m-1) * EaX @ Ta
2502 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2504 returnSerieAsArrayMatrix = True )
2505 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
2508 EaY = (HE2 - vy2) / _epsilon
2510 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / numpy.sqrt(__m-1)
2512 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
2513 mH = numpy.eye(__m) + EaY.transpose() @ (RI * EaY)
2514 Deltaw = - numpy.linalg.solve(mH,GradJ)
2519 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2524 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2526 Xn = vx1 + numpy.sqrt(__m-1) * EaX @ Ta @ Ua
2527 #--------------------------
2529 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2531 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2532 Xn = CovarianceInflation( Xn,
2533 selfA._parameters["InflationType"],
2534 selfA._parameters["InflationFactor"],
2537 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2538 #--------------------------
2540 if selfA._parameters["StoreInternalVariables"] \
2541 or selfA._toStore("CostFunctionJ") \
2542 or selfA._toStore("CostFunctionJb") \
2543 or selfA._toStore("CostFunctionJo") \
2544 or selfA._toStore("APosterioriCovariance") \
2545 or selfA._toStore("InnovationAtCurrentAnalysis") \
2546 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2547 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2548 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2549 _Innovation = Ynpu - _HXa
2551 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2552 # ---> avec analysis
2553 selfA.StoredVariables["Analysis"].store( Xa )
2554 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2555 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2556 if selfA._toStore("InnovationAtCurrentAnalysis"):
2557 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2558 # ---> avec current state
2559 if selfA._parameters["StoreInternalVariables"] \
2560 or selfA._toStore("CurrentState"):
2561 selfA.StoredVariables["CurrentState"].store( Xn )
2562 if selfA._toStore("ForecastState"):
2563 selfA.StoredVariables["ForecastState"].store( EMX )
2564 if selfA._toStore("BMA"):
2565 selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
2566 if selfA._toStore("InnovationAtCurrentState"):
2567 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu.reshape((__p,-1)) )
2568 if selfA._toStore("SimulatedObservationAtCurrentState") \
2569 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2570 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2572 if selfA._parameters["StoreInternalVariables"] \
2573 or selfA._toStore("CostFunctionJ") \
2574 or selfA._toStore("CostFunctionJb") \
2575 or selfA._toStore("CostFunctionJo") \
2576 or selfA._toStore("CurrentOptimum") \
2577 or selfA._toStore("APosterioriCovariance"):
2578 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2579 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2581 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2582 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2583 selfA.StoredVariables["CostFunctionJ" ].store( J )
2585 if selfA._toStore("IndexOfOptimum") \
2586 or selfA._toStore("CurrentOptimum") \
2587 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2588 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2589 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2590 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2591 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2592 if selfA._toStore("IndexOfOptimum"):
2593 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2594 if selfA._toStore("CurrentOptimum"):
2595 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2596 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2597 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2598 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2599 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2600 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2601 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2602 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2603 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2604 if selfA._toStore("APosterioriCovariance"):
2605 Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2607 Pn = 0.5 * (Pn + Pn.T)
2608 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2609 if selfA._parameters["EstimationOf"] == "Parameters" \
2610 and J < previousJMinimum:
2611 previousJMinimum = J
2613 if selfA._toStore("APosterioriCovariance"):
2614 covarianceXaMin = Pn
2616 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2617 # ----------------------------------------------------------------------
2618 if selfA._parameters["EstimationOf"] == "Parameters":
2619 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2620 selfA.StoredVariables["Analysis"].store( XaMin )
2621 if selfA._toStore("APosterioriCovariance"):
2622 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2623 if selfA._toStore("BMA"):
2624 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2628 # ==============================================================================
2629 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
2630 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
2632 Iterative EnKF (Sakov 2012, Sakov 2018)
2634 selfA est identique au "self" d'algorithme appelant et contient les
2637 if selfA._parameters["EstimationOf"] == "Parameters":
2638 selfA._parameters["StoreInternalVariables"] = True
2642 H = HO["Direct"].appliedControledFormTo
2644 if selfA._parameters["EstimationOf"] == "State":
2645 M = EM["Direct"].appliedControledFormTo
2647 if CM is not None and "Tangent" in CM and U is not None:
2648 Cm = CM["Tangent"].asMatrix(Xb)
2652 # Nombre de pas identique au nombre de pas d'observations
2653 # -------------------------------------------------------
2654 if hasattr(Y,"stepnumber"):
2655 duration = Y.stepnumber()
2656 __p = numpy.cumprod(Y.shape())[-1]
2659 __p = numpy.array(Y).size
2661 # Précalcul des inversions de B et R
2662 # ----------------------------------
2663 if selfA._parameters["StoreInternalVariables"] \
2664 or selfA._toStore("CostFunctionJ") \
2665 or selfA._toStore("CostFunctionJb") \
2666 or selfA._toStore("CostFunctionJo") \
2667 or selfA._toStore("CurrentOptimum") \
2668 or selfA._toStore("APosterioriCovariance"):
2675 __m = selfA._parameters["NumberOfMembers"]
2676 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2678 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2680 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2682 Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
2684 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2685 selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) )
2686 if selfA._toStore("APosterioriCovariance"):
2687 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2690 previousJMinimum = numpy.finfo(float).max
2692 for step in range(duration-1):
2693 if hasattr(Y,"store"):
2694 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
2696 Ynpu = numpy.ravel( Y ).reshape((__p,-1))
2699 if hasattr(U,"store") and len(U)>1:
2700 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2701 elif hasattr(U,"store") and len(U)==1:
2702 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2704 Un = numpy.asmatrix(numpy.ravel( U )).T
2708 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2709 Xn = CovarianceInflation( Xn,
2710 selfA._parameters["InflationType"],
2711 selfA._parameters["InflationFactor"],
2714 #--------------------------
2715 if VariantM == "IEnKF12":
2716 Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
2717 EaX = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1)
2722 vw = numpy.zeros(__m)
2723 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2724 vx1 = (Xfm + EaX @ vw).reshape((__n,-1))
2727 E1 = vx1 + _epsilon * EaX
2729 E1 = vx1 + numpy.sqrt(__m-1) * EaX @ Ta
2731 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
2732 E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2734 returnSerieAsArrayMatrix = True )
2735 elif selfA._parameters["EstimationOf"] == "Parameters":
2736 # --- > Par principe, M = Id
2738 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2739 vy1 = H((vx2, Un)).reshape((__p,-1))
2741 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
2743 returnSerieAsArrayMatrix = True )
2744 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
2747 EaY = (HE2 - vy2) / _epsilon
2749 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / numpy.sqrt(__m-1)
2751 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
2752 mH = numpy.eye(__m) + EaY.transpose() @ (RI * EaY)
2753 Deltaw = - numpy.linalg.solve(mH,GradJ)
2758 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2762 A2 = EnsembleOfAnomalies( E2 )
2765 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2766 A2 = numpy.sqrt(__m-1) * A2 @ Ta / _epsilon
2769 #--------------------------
2771 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2773 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2774 Xn = CovarianceInflation( Xn,
2775 selfA._parameters["InflationType"],
2776 selfA._parameters["InflationFactor"],
2779 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2780 #--------------------------
2782 if selfA._parameters["StoreInternalVariables"] \
2783 or selfA._toStore("CostFunctionJ") \
2784 or selfA._toStore("CostFunctionJb") \
2785 or selfA._toStore("CostFunctionJo") \
2786 or selfA._toStore("APosterioriCovariance") \
2787 or selfA._toStore("InnovationAtCurrentAnalysis") \
2788 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2789 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2790 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2791 _Innovation = Ynpu - _HXa
2793 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2794 # ---> avec analysis
2795 selfA.StoredVariables["Analysis"].store( Xa )
2796 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2797 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2798 if selfA._toStore("InnovationAtCurrentAnalysis"):
2799 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2800 # ---> avec current state
2801 if selfA._parameters["StoreInternalVariables"] \
2802 or selfA._toStore("CurrentState"):
2803 selfA.StoredVariables["CurrentState"].store( Xn )
2804 if selfA._toStore("ForecastState"):
2805 selfA.StoredVariables["ForecastState"].store( E2 )
2806 if selfA._toStore("BMA"):
2807 selfA.StoredVariables["BMA"].store( E2 - Xa )
2808 if selfA._toStore("InnovationAtCurrentState"):
2809 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu.reshape((__p,-1)) )
2810 if selfA._toStore("SimulatedObservationAtCurrentState") \
2811 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2812 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2814 if selfA._parameters["StoreInternalVariables"] \
2815 or selfA._toStore("CostFunctionJ") \
2816 or selfA._toStore("CostFunctionJb") \
2817 or selfA._toStore("CostFunctionJo") \
2818 or selfA._toStore("CurrentOptimum") \
2819 or selfA._toStore("APosterioriCovariance"):
2820 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2821 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2823 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2824 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2825 selfA.StoredVariables["CostFunctionJ" ].store( J )
2827 if selfA._toStore("IndexOfOptimum") \
2828 or selfA._toStore("CurrentOptimum") \
2829 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2830 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2831 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2832 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2833 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2834 if selfA._toStore("IndexOfOptimum"):
2835 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2836 if selfA._toStore("CurrentOptimum"):
2837 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2838 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2839 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2840 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2841 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2842 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2843 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2844 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2845 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2846 if selfA._toStore("APosterioriCovariance"):
2847 Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2849 Pn = 0.5 * (Pn + Pn.T)
2850 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2851 if selfA._parameters["EstimationOf"] == "Parameters" \
2852 and J < previousJMinimum:
2853 previousJMinimum = J
2855 if selfA._toStore("APosterioriCovariance"):
2856 covarianceXaMin = Pn
2858 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2859 # ----------------------------------------------------------------------
2860 if selfA._parameters["EstimationOf"] == "Parameters":
2861 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2862 selfA.StoredVariables["Analysis"].store( XaMin )
2863 if selfA._toStore("APosterioriCovariance"):
2864 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2865 if selfA._toStore("BMA"):
2866 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2870 # ==============================================================================
2871 if __name__ == "__main__":
2872 print('\n AUTODIAGNOSTIC\n')