1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les objets numériques génériques.
26 __author__ = "Jean-Philippe ARGAUD"
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
36 # ==============================================================================
37 def ExecuteFunction( paire ):
38 assert len(paire) == 2, "Incorrect number of arguments"
40 __X = numpy.asmatrix(numpy.ravel( X )).T
41 __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42 __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43 __fonction = getattr(__module,funcrepr["__userFunction__name"])
44 sys.path = __sys_path_tmp ; del __sys_path_tmp
45 __HX = __fonction( __X )
46 return numpy.ravel( __HX )
48 # ==============================================================================
49 class FDApproximation(object):
51 Cette classe sert d'interface pour définir les opérateurs approximés. A la
52 création d'un objet, en fournissant une fonction "Function", on obtient un
53 objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
54 "AdjointOperator". On contrôle l'approximation DF avec l'incrément
55 multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
56 "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
57 centrées si le booléen "centeredDF" est vrai.
60 name = "FDApproximation",
65 avoidingRedundancy = True,
66 toleranceInRedundancy = 1.e-18,
67 lenghtOfRedundancy = -1,
72 self.__name = str(name)
75 import multiprocessing
76 self.__mpEnabled = True
78 self.__mpEnabled = False
80 self.__mpEnabled = False
81 self.__mpWorkers = mpWorkers
82 if self.__mpWorkers is not None and self.__mpWorkers < 1:
83 self.__mpWorkers = None
84 logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
87 self.__mfEnabled = True
89 self.__mfEnabled = False
90 logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
92 if avoidingRedundancy:
94 self.__tolerBP = float(toleranceInRedundancy)
95 self.__lenghtRJ = int(lenghtOfRedundancy)
96 self.__listJPCP = [] # Jacobian Previous Calculated Points
97 self.__listJPCI = [] # Jacobian Previous Calculated Increment
98 self.__listJPCR = [] # Jacobian Previous Calculated Results
99 self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
100 self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
102 self.__avoidRC = False
105 if isinstance(Function,types.FunctionType):
106 logging.debug("FDA Calculs en multiprocessing : FunctionType")
107 self.__userFunction__name = Function.__name__
109 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
111 mod = os.path.abspath(Function.__globals__['__file__'])
112 if not os.path.isfile(mod):
113 raise ImportError("No user defined function or method found with the name %s"%(mod,))
114 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
115 self.__userFunction__path = os.path.dirname(mod)
117 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
118 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
119 elif isinstance(Function,types.MethodType):
120 logging.debug("FDA Calculs en multiprocessing : MethodType")
121 self.__userFunction__name = Function.__name__
123 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
125 mod = os.path.abspath(Function.__func__.__globals__['__file__'])
126 if not os.path.isfile(mod):
127 raise ImportError("No user defined function or method found with the name %s"%(mod,))
128 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
129 self.__userFunction__path = os.path.dirname(mod)
131 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
132 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
134 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
136 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
137 self.__userFunction = self.__userOperator.appliedTo
139 self.__centeredDF = bool(centeredDF)
140 if abs(float(increment)) > 1.e-15:
141 self.__increment = float(increment)
143 self.__increment = 0.01
147 self.__dX = numpy.asmatrix(numpy.ravel( dX )).T
148 logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
150 logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
152 # ---------------------------------------------------------
153 def __doublon__(self, e, l, n, v=None):
154 __ac, __iac = False, -1
155 for i in range(len(l)-1,-1,-1):
156 if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
157 __ac, __iac = True, i
158 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
162 # ---------------------------------------------------------
163 def DirectOperator(self, X ):
165 Calcul du direct à l'aide de la fonction fournie.
167 logging.debug("FDA Calcul DirectOperator (explicite)")
169 _HX = self.__userFunction( X, argsAsSerie = True )
171 _X = numpy.asmatrix(numpy.ravel( X )).T
172 _HX = numpy.ravel(self.__userFunction( _X ))
176 # ---------------------------------------------------------
177 def TangentMatrix(self, X ):
179 Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
180 c'est-à-dire le gradient de H en X. On utilise des différences finies
181 directionnelles autour du point X. X est un numpy.matrix.
183 Différences finies centrées (approximation d'ordre 2):
184 1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
185 dX[i] à la composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
186 on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
188 2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
190 3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
192 Différences finies non centrées (approximation d'ordre 1):
193 1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
194 composante X[i] pour composer X_plus_dXi, et on calcule la réponse
195 HX_plus_dXi = H( X_plus_dXi )
196 2/ On calcule la valeur centrale HX = H(X)
197 3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
199 4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
202 logging.debug("FDA Début du calcul de la Jacobienne")
203 logging.debug("FDA Incrément de............: %s*X"%float(self.__increment))
204 logging.debug("FDA Approximation centrée...: %s"%(self.__centeredDF))
206 if X is None or len(X)==0:
207 raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
209 _X = numpy.asmatrix(numpy.ravel( X )).T
211 if self.__dX is None:
212 _dX = self.__increment * _X
214 _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
216 if (_dX == 0.).any():
219 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
221 _dX = numpy.where( _dX == 0., moyenne, _dX )
223 __alreadyCalculated = False
225 __bidon, __alreadyCalculatedP = self.__doublon__(_X, self.__listJPCP, self.__listJPPN, None)
226 __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
227 if __alreadyCalculatedP == __alreadyCalculatedI > -1:
228 __alreadyCalculated, __i = True, __alreadyCalculatedP
229 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
231 if __alreadyCalculated:
232 logging.debug("FDA Calcul Jacobienne (par récupération du doublon %i)"%__i)
233 _Jacobienne = self.__listJPCR[__i]
235 logging.debug("FDA Calcul Jacobienne (explicite)")
236 if self.__centeredDF:
238 if self.__mpEnabled and not self.__mfEnabled:
240 "__userFunction__path" : self.__userFunction__path,
241 "__userFunction__modl" : self.__userFunction__modl,
242 "__userFunction__name" : self.__userFunction__name,
245 for i in range( len(_dX) ):
247 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
248 _X_plus_dXi[i] = _X[i] + _dXi
249 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
250 _X_moins_dXi[i] = _X[i] - _dXi
252 _jobs.append( (_X_plus_dXi, funcrepr) )
253 _jobs.append( (_X_moins_dXi, funcrepr) )
255 import multiprocessing
256 self.__pool = multiprocessing.Pool(self.__mpWorkers)
257 _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
262 for i in range( len(_dX) ):
263 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
265 elif self.__mfEnabled:
267 for i in range( len(_dX) ):
269 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
270 _X_plus_dXi[i] = _X[i] + _dXi
271 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
272 _X_moins_dXi[i] = _X[i] - _dXi
274 _xserie.append( _X_plus_dXi )
275 _xserie.append( _X_moins_dXi )
277 _HX_plusmoins_dX = self.DirectOperator( _xserie )
280 for i in range( len(_dX) ):
281 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
285 for i in range( _dX.size ):
287 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
288 _X_plus_dXi[i] = _X[i] + _dXi
289 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
290 _X_moins_dXi[i] = _X[i] - _dXi
292 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
293 _HX_moins_dXi = self.DirectOperator( _X_moins_dXi )
295 _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
299 if self.__mpEnabled and not self.__mfEnabled:
301 "__userFunction__path" : self.__userFunction__path,
302 "__userFunction__modl" : self.__userFunction__modl,
303 "__userFunction__name" : self.__userFunction__name,
306 _jobs.append( (_X.A1, funcrepr) )
307 for i in range( len(_dX) ):
308 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
309 _X_plus_dXi[i] = _X[i] + _dX[i]
311 _jobs.append( (_X_plus_dXi, funcrepr) )
313 import multiprocessing
314 self.__pool = multiprocessing.Pool(self.__mpWorkers)
315 _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
319 _HX = _HX_plus_dX.pop(0)
322 for i in range( len(_dX) ):
323 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
325 elif self.__mfEnabled:
327 _xserie.append( _X.A1 )
328 for i in range( len(_dX) ):
329 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
330 _X_plus_dXi[i] = _X[i] + _dX[i]
332 _xserie.append( _X_plus_dXi )
334 _HX_plus_dX = self.DirectOperator( _xserie )
336 _HX = _HX_plus_dX.pop(0)
339 for i in range( len(_dX) ):
340 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
344 _HX = self.DirectOperator( _X )
345 for i in range( _dX.size ):
347 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
348 _X_plus_dXi[i] = _X[i] + _dXi
350 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
352 _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
355 _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
357 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
358 while len(self.__listJPCP) > self.__lenghtRJ:
359 self.__listJPCP.pop(0)
360 self.__listJPCI.pop(0)
361 self.__listJPCR.pop(0)
362 self.__listJPPN.pop(0)
363 self.__listJPIN.pop(0)
364 self.__listJPCP.append( copy.copy(_X) )
365 self.__listJPCI.append( copy.copy(_dX) )
366 self.__listJPCR.append( copy.copy(_Jacobienne) )
367 self.__listJPPN.append( numpy.linalg.norm(_X) )
368 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
370 logging.debug("FDA Fin du calcul de la Jacobienne")
374 # ---------------------------------------------------------
375 def TangentOperator(self, paire ):
377 Calcul du tangent à l'aide de la Jacobienne.
380 assert len(paire) == 1, "Incorrect lenght of arguments"
382 assert len(_paire) == 2, "Incorrect number of arguments"
384 assert len(paire) == 2, "Incorrect number of arguments"
387 _Jacobienne = self.TangentMatrix( X )
388 if dX is None or len(dX) == 0:
390 # Calcul de la forme matricielle si le second argument est None
391 # -------------------------------------------------------------
392 if self.__mfEnabled: return [_Jacobienne,]
393 else: return _Jacobienne
396 # Calcul de la valeur linéarisée de H en X appliqué à dX
397 # ------------------------------------------------------
398 _dX = numpy.asmatrix(numpy.ravel( dX )).T
399 _HtX = numpy.dot(_Jacobienne, _dX)
400 if self.__mfEnabled: return [_HtX.A1,]
403 # ---------------------------------------------------------
404 def AdjointOperator(self, paire ):
406 Calcul de l'adjoint à l'aide de la Jacobienne.
409 assert len(paire) == 1, "Incorrect lenght of arguments"
411 assert len(_paire) == 2, "Incorrect number of arguments"
413 assert len(paire) == 2, "Incorrect number of arguments"
416 _JacobienneT = self.TangentMatrix( X ).T
417 if Y is None or len(Y) == 0:
419 # Calcul de la forme matricielle si le second argument est None
420 # -------------------------------------------------------------
421 if self.__mfEnabled: return [_JacobienneT,]
422 else: return _JacobienneT
425 # Calcul de la valeur de l'adjoint en X appliqué à Y
426 # --------------------------------------------------
427 _Y = numpy.asmatrix(numpy.ravel( Y )).T
428 _HaY = numpy.dot(_JacobienneT, _Y)
429 if self.__mfEnabled: return [_HaY.A1,]
432 # ==============================================================================
444 Implémentation informatique de l'algorithme MMQR, basée sur la publication :
445 David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
446 Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
449 # Recuperation des donnees et informations initiales
450 # --------------------------------------------------
451 variables = numpy.ravel( x0 )
452 mesures = numpy.ravel( y )
453 increment = sys.float_info[0]
456 quantile = float(quantile)
458 # Calcul des parametres du MM
459 # ---------------------------
460 tn = float(toler) / n
461 e0 = -tn / math.log(tn)
462 epsilon = (e0-tn)/(1+math.log(e0))
464 # Calculs d'initialisation
465 # ------------------------
466 residus = mesures - numpy.ravel( func( variables ) )
467 poids = 1./(epsilon+numpy.abs(residus))
468 veps = 1. - 2. * quantile - residus * poids
469 lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
472 # Recherche iterative
473 # -------------------
474 while (increment > toler) and (iteration < maxfun) :
477 Derivees = numpy.array(fprime(variables))
478 Derivees = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
479 DeriveesT = Derivees.transpose()
480 M = numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
481 SM = numpy.transpose(numpy.dot( DeriveesT , veps ))
482 step = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
484 variables = variables + step
485 if bounds is not None:
486 # Attention : boucle infinie à éviter si un intervalle est trop petit
487 while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
489 variables = variables - step
490 residus = mesures - numpy.ravel( func(variables) )
491 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
493 while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
495 variables = variables - step
496 residus = mesures - numpy.ravel( func(variables) )
497 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
499 increment = lastsurrogate-surrogate
500 poids = 1./(epsilon+numpy.abs(residus))
501 veps = 1. - 2. * quantile - residus * poids
502 lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
506 Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
508 return variables, Ecart, [n,p,iteration,increment,0]
510 # ==============================================================================
511 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
512 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
514 _bgcenter = numpy.ravel(_bgcenter)[:,None]
516 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
518 if _bgcovariance is None:
519 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
521 _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
522 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
524 return BackgroundEnsemble
526 # ==============================================================================
527 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
528 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
529 def __CenteredRandomAnomalies(Zr, N):
531 Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
532 notes manuscrites de MB et conforme au code de PS avec eps = -1
535 Q = numpy.eye(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
536 Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
537 R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
542 _bgcenter = numpy.ravel(_bgcenter)[:,None]
544 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
545 if _bgcovariance is None:
546 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
549 U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
550 _nbctl = _bgcenter.size
551 if _nbmembers > _nbctl:
552 _Z = numpy.concatenate((numpy.dot(
553 numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
554 numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
556 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
557 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
558 BackgroundEnsemble = _bgcenter + _Zca
560 if max(abs(_bgcovariance.flatten())) > 0:
561 _nbctl = _bgcenter.size
562 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
563 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
564 BackgroundEnsemble = _bgcenter + _Zca
566 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
568 return BackgroundEnsemble
570 # ==============================================================================
571 def EnsembleOfAnomalies( _ensemble, _optmean = None):
572 "Renvoie les anomalies centrées à partir d'un ensemble TailleEtat*NbMembres"
574 Em = numpy.asarray(_ensemble).mean(axis=1, dtype=mfp).astype('float')[:,numpy.newaxis]
576 Em = numpy.ravel(_optmean)[:,numpy.newaxis]
578 return numpy.asarray(_ensemble) - Em
580 # ==============================================================================
581 def CovarianceInflation(
583 InflationType = None,
584 InflationFactor = None,
585 BackgroundCov = None,
588 Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
590 Synthèse : Hunt 2007, section 2.3.5
592 if InflationFactor is None:
595 InflationFactor = float(InflationFactor)
597 if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
598 if InflationFactor < 1.:
599 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
600 if InflationFactor < 1.+mpr:
602 OutputCovOrEns = InflationFactor**2 * InputCovOrEns
604 elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
605 if InflationFactor < 1.:
606 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
607 if InflationFactor < 1.+mpr:
609 InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
610 OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
611 + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
613 elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
614 if InflationFactor < 0.:
615 raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
616 if InflationFactor < mpr:
618 __n, __m = numpy.asarray(InputCovOrEns).shape
620 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
621 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.eye(__n)
623 elif InflationType == "HybridOnBackgroundCovariance":
624 if InflationFactor < 0.:
625 raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
626 if InflationFactor < mpr:
628 __n, __m = numpy.asarray(InputCovOrEns).shape
630 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
631 if BackgroundCov is None:
632 raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
633 if InputCovOrEns.shape != BackgroundCov.shape:
634 raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
635 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
637 elif InflationType == "Relaxation":
638 raise NotImplementedError("InflationType Relaxation")
641 raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
643 return OutputCovOrEns
645 # ==============================================================================
646 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
648 3DVAR (Bouttier 1999, Courtier 1993)
650 selfA est identique au "self" d'algorithme appelant et contient les
654 # Correction pour pallier a un bug de TNC sur le retour du Minimum
655 if "Minimizer" in selfA._parameters and selfA._parameters["Minimizer"] == "TNC":
656 selfA.setParameterValue("StoreInternalVariables",True)
660 Hm = HO["Direct"].appliedTo
661 Ha = HO["Adjoint"].appliedInXTo
663 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
664 # ----------------------------------------------------
665 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
666 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
669 HXb = numpy.asmatrix(numpy.ravel( HXb )).T
670 if Y.size != HXb.size:
671 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
672 if max(Y.shape) != max(HXb.shape):
673 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
675 if selfA._toStore("JacobianMatrixAtBackground"):
676 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
677 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
678 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
680 # Précalcul des inversions de B et R
681 # ----------------------------------
685 # Point de démarrage de l'optimisation
686 # ------------------------------------
687 if selfA._parameters["InitializationPoint"] is not None:
688 Xini = numpy.ravel(selfA._parameters["InitializationPoint"])
689 if Xini.size != numpy.ravel(Xb).size:
690 raise ValueError("Incompatible size %i of forced initial point to replace the Xb of size %i" \
691 %(Xini.size,numpy.ravel(Xb).size))
693 Xini = numpy.ravel(Xb)
695 # Définition de la fonction-coût
696 # ------------------------------
698 _X = numpy.asmatrix(numpy.ravel( x )).T
699 if selfA._parameters["StoreInternalVariables"] or \
700 selfA._toStore("CurrentState") or \
701 selfA._toStore("CurrentOptimum"):
702 selfA.StoredVariables["CurrentState"].store( _X )
704 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
705 _Innovation = Y - _HX
706 if selfA._toStore("SimulatedObservationAtCurrentState") or \
707 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
708 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
709 if selfA._toStore("InnovationAtCurrentState"):
710 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
712 Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
713 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
716 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
717 selfA.StoredVariables["CostFunctionJb"].store( Jb )
718 selfA.StoredVariables["CostFunctionJo"].store( Jo )
719 selfA.StoredVariables["CostFunctionJ" ].store( J )
720 if selfA._toStore("IndexOfOptimum") or \
721 selfA._toStore("CurrentOptimum") or \
722 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
723 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
724 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
725 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
726 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
727 if selfA._toStore("IndexOfOptimum"):
728 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
729 if selfA._toStore("CurrentOptimum"):
730 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
731 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
732 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
733 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
734 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
735 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
736 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
737 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
738 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
741 def GradientOfCostFunction(x):
742 _X = numpy.asmatrix(numpy.ravel( x )).T
744 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
745 GradJb = BI * (_X - Xb)
746 GradJo = - Ha( (_X, RI * (Y - _HX)) )
747 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
750 # Minimisation de la fonctionnelle
751 # --------------------------------
752 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
754 if selfA._parameters["Minimizer"] == "LBFGSB":
755 if "0.19" <= scipy.version.version <= "1.1.0":
756 import lbfgsbhlt as optimiseur
758 import scipy.optimize as optimiseur
759 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
762 fprime = GradientOfCostFunction,
764 bounds = selfA._parameters["Bounds"],
765 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
766 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
767 pgtol = selfA._parameters["ProjectedGradientTolerance"],
768 iprint = selfA._parameters["optiprint"],
770 nfeval = Informations['funcalls']
771 rc = Informations['warnflag']
772 elif selfA._parameters["Minimizer"] == "TNC":
773 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
776 fprime = GradientOfCostFunction,
778 bounds = selfA._parameters["Bounds"],
779 maxfun = selfA._parameters["MaximumNumberOfSteps"],
780 pgtol = selfA._parameters["ProjectedGradientTolerance"],
781 ftol = selfA._parameters["CostDecrementTolerance"],
782 messages = selfA._parameters["optmessages"],
784 elif selfA._parameters["Minimizer"] == "CG":
785 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
788 fprime = GradientOfCostFunction,
790 maxiter = selfA._parameters["MaximumNumberOfSteps"],
791 gtol = selfA._parameters["GradientNormTolerance"],
792 disp = selfA._parameters["optdisp"],
795 elif selfA._parameters["Minimizer"] == "NCG":
796 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
799 fprime = GradientOfCostFunction,
801 maxiter = selfA._parameters["MaximumNumberOfSteps"],
802 avextol = selfA._parameters["CostDecrementTolerance"],
803 disp = selfA._parameters["optdisp"],
806 elif selfA._parameters["Minimizer"] == "BFGS":
807 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
810 fprime = GradientOfCostFunction,
812 maxiter = selfA._parameters["MaximumNumberOfSteps"],
813 gtol = selfA._parameters["GradientNormTolerance"],
814 disp = selfA._parameters["optdisp"],
818 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
820 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
821 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
823 # Correction pour pallier a un bug de TNC sur le retour du Minimum
824 # ----------------------------------------------------------------
825 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
826 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
828 # Obtention de l'analyse
829 # ----------------------
830 Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
832 selfA.StoredVariables["Analysis"].store( Xa.A1 )
834 if selfA._toStore("OMA") or \
835 selfA._toStore("SigmaObs2") or \
836 selfA._toStore("SimulationQuantiles") or \
837 selfA._toStore("SimulatedObservationAtOptimum"):
838 if selfA._toStore("SimulatedObservationAtCurrentState"):
839 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
840 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
841 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
845 # Calcul de la covariance d'analyse
846 # ---------------------------------
847 if selfA._toStore("APosterioriCovariance") or \
848 selfA._toStore("SimulationQuantiles") or \
849 selfA._toStore("JacobianMatrixAtOptimum") or \
850 selfA._toStore("KalmanGainAtOptimum"):
851 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
852 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
853 if selfA._toStore("APosterioriCovariance") or \
854 selfA._toStore("SimulationQuantiles") or \
855 selfA._toStore("KalmanGainAtOptimum"):
856 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
857 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
858 if selfA._toStore("APosterioriCovariance") or \
859 selfA._toStore("SimulationQuantiles"):
863 _ee = numpy.matrix(numpy.zeros(nb)).T
865 _HtEE = numpy.dot(HtM,_ee)
866 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
867 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
868 HessienneI = numpy.matrix( HessienneI )
870 if min(A.shape) != max(A.shape):
871 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
872 if (numpy.diag(A) < 0).any():
873 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
874 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
876 L = numpy.linalg.cholesky( A )
878 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
879 if selfA._toStore("APosterioriCovariance"):
880 selfA.StoredVariables["APosterioriCovariance"].store( A )
881 if selfA._toStore("JacobianMatrixAtOptimum"):
882 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
883 if selfA._toStore("KalmanGainAtOptimum"):
884 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
885 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
886 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
888 # Calculs et/ou stockages supplémentaires
889 # ---------------------------------------
890 if selfA._toStore("Innovation") or \
891 selfA._toStore("SigmaObs2") or \
892 selfA._toStore("MahalanobisConsistency") or \
893 selfA._toStore("OMB"):
895 if selfA._toStore("Innovation"):
896 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
897 if selfA._toStore("BMA"):
898 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
899 if selfA._toStore("OMA"):
900 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
901 if selfA._toStore("OMB"):
902 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
903 if selfA._toStore("SigmaObs2"):
904 TraceR = R.trace(Y.size)
905 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
906 if selfA._toStore("MahalanobisConsistency"):
907 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
908 if selfA._toStore("SimulationQuantiles"):
909 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
910 HXa = numpy.matrix(numpy.ravel( HXa )).T
912 for i in range(nech):
913 if selfA._parameters["SimulationForQuantiles"] == "Linear":
914 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
915 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
917 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
918 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
919 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
923 YfQ = numpy.hstack((YfQ,Yr))
926 for quantile in selfA._parameters["Quantiles"]:
927 if not (0. <= float(quantile) <= 1.): continue
928 indice = int(nech * float(quantile) - 1./nech)
929 if YQ is None: YQ = YfQ[:,indice]
930 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
931 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
932 if selfA._toStore("SimulatedObservationAtBackground"):
933 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
934 if selfA._toStore("SimulatedObservationAtOptimum"):
935 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
939 # ==============================================================================
940 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
942 3DVAR variational analysis with no inversion of B (Huang 2000)
944 selfA est identique au "self" d'algorithme appelant et contient les
948 # Correction pour pallier a un bug de TNC sur le retour du Minimum
949 if "Minimizer" in selfA._parameters and selfA._parameters["Minimizer"] == "TNC":
950 selfA.setParameterValue("StoreInternalVariables",True)
954 Hm = HO["Direct"].appliedTo
955 Ha = HO["Adjoint"].appliedInXTo
957 # Précalcul des inversions de B et R
961 # Point de démarrage de l'optimisation
962 Xini = numpy.zeros(Xb.shape)
964 # Définition de la fonction-coût
965 # ------------------------------
967 _V = numpy.asmatrix(numpy.ravel( v )).T
969 if selfA._parameters["StoreInternalVariables"] or \
970 selfA._toStore("CurrentState") or \
971 selfA._toStore("CurrentOptimum"):
972 selfA.StoredVariables["CurrentState"].store( _X )
974 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
975 _Innovation = Y - _HX
976 if selfA._toStore("SimulatedObservationAtCurrentState") or \
977 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
978 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
979 if selfA._toStore("InnovationAtCurrentState"):
980 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
982 Jb = float( 0.5 * _V.T * BT * _V )
983 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
986 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
987 selfA.StoredVariables["CostFunctionJb"].store( Jb )
988 selfA.StoredVariables["CostFunctionJo"].store( Jo )
989 selfA.StoredVariables["CostFunctionJ" ].store( J )
990 if selfA._toStore("IndexOfOptimum") or \
991 selfA._toStore("CurrentOptimum") or \
992 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
993 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
994 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
995 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
996 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
997 if selfA._toStore("IndexOfOptimum"):
998 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
999 if selfA._toStore("CurrentOptimum"):
1000 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1001 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1002 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1003 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1004 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1005 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1006 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1007 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1008 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1011 def GradientOfCostFunction(v):
1012 _V = numpy.asmatrix(numpy.ravel( v )).T
1015 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
1017 GradJo = - Ha( (_X, RI * (Y - _HX)) )
1018 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1021 # Minimisation de la fonctionnelle
1022 # --------------------------------
1023 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1025 if selfA._parameters["Minimizer"] == "LBFGSB":
1026 if "0.19" <= scipy.version.version <= "1.1.0":
1027 import lbfgsbhlt as optimiseur
1029 import scipy.optimize as optimiseur
1030 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1031 func = CostFunction,
1033 fprime = GradientOfCostFunction,
1035 bounds = selfA._parameters["Bounds"],
1036 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
1037 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
1038 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1039 iprint = selfA._parameters["optiprint"],
1041 nfeval = Informations['funcalls']
1042 rc = Informations['warnflag']
1043 elif selfA._parameters["Minimizer"] == "TNC":
1044 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1045 func = CostFunction,
1047 fprime = GradientOfCostFunction,
1049 bounds = selfA._parameters["Bounds"],
1050 maxfun = selfA._parameters["MaximumNumberOfSteps"],
1051 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1052 ftol = selfA._parameters["CostDecrementTolerance"],
1053 messages = selfA._parameters["optmessages"],
1055 elif selfA._parameters["Minimizer"] == "CG":
1056 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1059 fprime = GradientOfCostFunction,
1061 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1062 gtol = selfA._parameters["GradientNormTolerance"],
1063 disp = selfA._parameters["optdisp"],
1066 elif selfA._parameters["Minimizer"] == "NCG":
1067 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1070 fprime = GradientOfCostFunction,
1072 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1073 avextol = selfA._parameters["CostDecrementTolerance"],
1074 disp = selfA._parameters["optdisp"],
1077 elif selfA._parameters["Minimizer"] == "BFGS":
1078 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1081 fprime = GradientOfCostFunction,
1083 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1084 gtol = selfA._parameters["GradientNormTolerance"],
1085 disp = selfA._parameters["optdisp"],
1089 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1091 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1092 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1094 # Correction pour pallier a un bug de TNC sur le retour du Minimum
1095 # ----------------------------------------------------------------
1096 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1097 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1098 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1100 Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
1102 # Obtention de l'analyse
1103 # ----------------------
1106 selfA.StoredVariables["Analysis"].store( Xa )
1108 if selfA._toStore("OMA") or \
1109 selfA._toStore("SigmaObs2") or \
1110 selfA._toStore("SimulationQuantiles") or \
1111 selfA._toStore("SimulatedObservationAtOptimum"):
1112 if selfA._toStore("SimulatedObservationAtCurrentState"):
1113 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1114 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1115 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1119 # Calcul de la covariance d'analyse
1120 # ---------------------------------
1121 if selfA._toStore("APosterioriCovariance") or \
1122 selfA._toStore("SimulationQuantiles") or \
1123 selfA._toStore("JacobianMatrixAtOptimum") or \
1124 selfA._toStore("KalmanGainAtOptimum"):
1125 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1126 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1127 if selfA._toStore("APosterioriCovariance") or \
1128 selfA._toStore("SimulationQuantiles") or \
1129 selfA._toStore("KalmanGainAtOptimum"):
1130 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1131 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1132 if selfA._toStore("APosterioriCovariance") or \
1133 selfA._toStore("SimulationQuantiles"):
1138 _ee = numpy.matrix(numpy.zeros(nb)).T
1140 _HtEE = numpy.dot(HtM,_ee)
1141 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
1142 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1143 HessienneI = numpy.matrix( HessienneI )
1145 if min(A.shape) != max(A.shape):
1146 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1147 if (numpy.diag(A) < 0).any():
1148 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1149 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1151 L = numpy.linalg.cholesky( A )
1153 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1154 if selfA._toStore("APosterioriCovariance"):
1155 selfA.StoredVariables["APosterioriCovariance"].store( A )
1156 if selfA._toStore("JacobianMatrixAtOptimum"):
1157 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1158 if selfA._toStore("KalmanGainAtOptimum"):
1159 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1160 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1161 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1163 # Calculs et/ou stockages supplémentaires
1164 # ---------------------------------------
1165 if selfA._toStore("Innovation") or \
1166 selfA._toStore("SigmaObs2") or \
1167 selfA._toStore("MahalanobisConsistency") or \
1168 selfA._toStore("OMB"):
1170 if selfA._toStore("Innovation"):
1171 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1172 if selfA._toStore("BMA"):
1173 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1174 if selfA._toStore("OMA"):
1175 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1176 if selfA._toStore("OMB"):
1177 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1178 if selfA._toStore("SigmaObs2"):
1179 TraceR = R.trace(Y.size)
1180 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1181 if selfA._toStore("MahalanobisConsistency"):
1182 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1183 if selfA._toStore("SimulationQuantiles"):
1184 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1185 HXa = numpy.matrix(numpy.ravel( HXa )).T
1187 for i in range(nech):
1188 if selfA._parameters["SimulationForQuantiles"] == "Linear":
1189 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1190 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1192 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1193 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1194 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1198 YfQ = numpy.hstack((YfQ,Yr))
1201 for quantile in selfA._parameters["Quantiles"]:
1202 if not (0. <= float(quantile) <= 1.): continue
1203 indice = int(nech * float(quantile) - 1./nech)
1204 if YQ is None: YQ = YfQ[:,indice]
1205 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
1206 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1207 if selfA._toStore("SimulatedObservationAtBackground"):
1208 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1209 if selfA._toStore("SimulatedObservationAtOptimum"):
1210 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1214 # ==============================================================================
1215 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1217 3DVAR incrémental (Courtier 1994, 1997)
1219 selfA est identique au "self" d'algorithme appelant et contient les
1223 # Correction pour pallier a un bug de TNC sur le retour du Minimum
1224 if "Minimizer" in selfA._parameters and selfA._parameters["Minimizer"] == "TNC":
1225 selfA.setParameterValue("StoreInternalVariables",True)
1230 # Opérateur non-linéaire pour la boucle externe
1231 Hm = HO["Direct"].appliedTo
1233 # Précalcul des inversions de B et R
1237 # Point de démarrage de l'optimisation
1238 if selfA._parameters["InitializationPoint"] is not None:
1239 Xini = numpy.ravel(selfA._parameters["InitializationPoint"])
1240 if Xini.size != numpy.ravel(Xb).size:
1241 raise ValueError("Incompatible size %i of forced initial point to replace the Xb of size %i" \
1242 %(Xini.size,numpy.ravel(Xb).size))
1244 Xini = numpy.ravel(Xb)
1246 HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
1247 Innovation = Y - HXb
1255 while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
1259 Ht = HO["Tangent"].asMatrix(Xr)
1260 Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
1262 # Définition de la fonction-coût
1263 # ------------------------------
1264 def CostFunction(dx):
1265 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1266 if selfA._parameters["StoreInternalVariables"] or \
1267 selfA._toStore("CurrentState") or \
1268 selfA._toStore("CurrentOptimum"):
1269 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
1271 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1272 _dInnovation = Innovation - _HdX
1273 if selfA._toStore("SimulatedObservationAtCurrentState") or \
1274 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1275 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
1276 if selfA._toStore("InnovationAtCurrentState"):
1277 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
1279 Jb = float( 0.5 * _dX.T * BI * _dX )
1280 Jo = float( 0.5 * _dInnovation.T * RI * _dInnovation )
1283 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1284 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1285 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1286 selfA.StoredVariables["CostFunctionJ" ].store( J )
1287 if selfA._toStore("IndexOfOptimum") or \
1288 selfA._toStore("CurrentOptimum") or \
1289 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1290 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1291 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1292 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1293 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1294 if selfA._toStore("IndexOfOptimum"):
1295 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1296 if selfA._toStore("CurrentOptimum"):
1297 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1298 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1299 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1300 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1301 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1302 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1303 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1304 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1305 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1308 def GradientOfCostFunction(dx):
1309 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1311 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1312 _dInnovation = Innovation - _HdX
1314 GradJo = - Ht.T @ (RI * _dInnovation)
1315 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1318 # Minimisation de la fonctionnelle
1319 # --------------------------------
1320 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1322 if selfA._parameters["Minimizer"] == "LBFGSB":
1323 # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1324 if "0.19" <= scipy.version.version <= "1.1.0":
1325 import lbfgsbhlt as optimiseur
1327 import scipy.optimize as optimiseur
1328 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1329 func = CostFunction,
1330 x0 = numpy.zeros(Xini.size),
1331 fprime = GradientOfCostFunction,
1333 bounds = selfA._parameters["Bounds"],
1334 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
1335 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
1336 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1337 iprint = selfA._parameters["optiprint"],
1339 nfeval = Informations['funcalls']
1340 rc = Informations['warnflag']
1341 elif selfA._parameters["Minimizer"] == "TNC":
1342 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1343 func = CostFunction,
1344 x0 = numpy.zeros(Xini.size),
1345 fprime = GradientOfCostFunction,
1347 bounds = selfA._parameters["Bounds"],
1348 maxfun = selfA._parameters["MaximumNumberOfSteps"],
1349 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1350 ftol = selfA._parameters["CostDecrementTolerance"],
1351 messages = selfA._parameters["optmessages"],
1353 elif selfA._parameters["Minimizer"] == "CG":
1354 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1356 x0 = numpy.zeros(Xini.size),
1357 fprime = GradientOfCostFunction,
1359 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1360 gtol = selfA._parameters["GradientNormTolerance"],
1361 disp = selfA._parameters["optdisp"],
1364 elif selfA._parameters["Minimizer"] == "NCG":
1365 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1367 x0 = numpy.zeros(Xini.size),
1368 fprime = GradientOfCostFunction,
1370 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1371 avextol = selfA._parameters["CostDecrementTolerance"],
1372 disp = selfA._parameters["optdisp"],
1375 elif selfA._parameters["Minimizer"] == "BFGS":
1376 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1378 x0 = numpy.zeros(Xini.size),
1379 fprime = GradientOfCostFunction,
1381 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1382 gtol = selfA._parameters["GradientNormTolerance"],
1383 disp = selfA._parameters["optdisp"],
1387 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1389 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1390 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1392 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1393 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1394 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1396 Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
1399 DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
1400 iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
1402 # Obtention de l'analyse
1403 # ----------------------
1406 selfA.StoredVariables["Analysis"].store( Xa )
1408 if selfA._toStore("OMA") or \
1409 selfA._toStore("SigmaObs2") or \
1410 selfA._toStore("SimulationQuantiles") or \
1411 selfA._toStore("SimulatedObservationAtOptimum"):
1412 if selfA._toStore("SimulatedObservationAtCurrentState"):
1413 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1414 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1415 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1419 # Calcul de la covariance d'analyse
1420 # ---------------------------------
1421 if selfA._toStore("APosterioriCovariance") or \
1422 selfA._toStore("SimulationQuantiles") or \
1423 selfA._toStore("JacobianMatrixAtOptimum") or \
1424 selfA._toStore("KalmanGainAtOptimum"):
1425 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1426 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1427 if selfA._toStore("APosterioriCovariance") or \
1428 selfA._toStore("SimulationQuantiles") or \
1429 selfA._toStore("KalmanGainAtOptimum"):
1430 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1431 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1432 if selfA._toStore("APosterioriCovariance") or \
1433 selfA._toStore("SimulationQuantiles"):
1437 _ee = numpy.matrix(numpy.zeros(nb)).T
1439 _HtEE = numpy.dot(HtM,_ee)
1440 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
1441 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1442 HessienneI = numpy.matrix( HessienneI )
1444 if min(A.shape) != max(A.shape):
1445 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1446 if (numpy.diag(A) < 0).any():
1447 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1448 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1450 L = numpy.linalg.cholesky( A )
1452 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1453 if selfA._toStore("APosterioriCovariance"):
1454 selfA.StoredVariables["APosterioriCovariance"].store( A )
1455 if selfA._toStore("JacobianMatrixAtOptimum"):
1456 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1457 if selfA._toStore("KalmanGainAtOptimum"):
1458 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1459 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1460 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1462 # Calculs et/ou stockages supplémentaires
1463 # ---------------------------------------
1464 if selfA._toStore("Innovation") or \
1465 selfA._toStore("SigmaObs2") or \
1466 selfA._toStore("MahalanobisConsistency") or \
1467 selfA._toStore("OMB"):
1469 if selfA._toStore("Innovation"):
1470 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1471 if selfA._toStore("BMA"):
1472 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1473 if selfA._toStore("OMA"):
1474 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1475 if selfA._toStore("OMB"):
1476 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1477 if selfA._toStore("SigmaObs2"):
1478 TraceR = R.trace(Y.size)
1479 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1480 if selfA._toStore("MahalanobisConsistency"):
1481 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1482 if selfA._toStore("SimulationQuantiles"):
1483 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1484 HXa = numpy.matrix(numpy.ravel( HXa )).T
1486 for i in range(nech):
1487 if selfA._parameters["SimulationForQuantiles"] == "Linear":
1488 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1489 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1491 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1492 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1493 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1497 YfQ = numpy.hstack((YfQ,Yr))
1500 for quantile in selfA._parameters["Quantiles"]:
1501 if not (0. <= float(quantile) <= 1.): continue
1502 indice = int(nech * float(quantile) - 1./nech)
1503 if YQ is None: YQ = YfQ[:,indice]
1504 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
1505 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1506 if selfA._toStore("SimulatedObservationAtBackground"):
1507 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1508 if selfA._toStore("SimulatedObservationAtOptimum"):
1509 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1513 # ==============================================================================
1514 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1516 3DVAR PSAS (Huang 2000)
1518 selfA est identique au "self" d'algorithme appelant et contient les
1522 # Correction pour pallier a un bug de TNC sur le retour du Minimum
1523 if "Minimizer" in selfA._parameters and selfA._parameters["Minimizer"] == "TNC":
1524 selfA.setParameterValue("StoreInternalVariables",True)
1530 Hm = HO["Direct"].appliedTo
1532 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
1533 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
1534 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
1537 HXb = numpy.asmatrix(numpy.ravel( HXb )).T
1538 if Y.size != HXb.size:
1539 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
1540 if max(Y.shape) != max(HXb.shape):
1541 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
1543 if selfA._toStore("JacobianMatrixAtBackground"):
1544 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
1545 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
1546 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
1548 Ht = HO["Tangent"].asMatrix(Xb)
1550 HBHTpR = R + Ht * BHT
1551 Innovation = Y - HXb
1553 # Point de démarrage de l'optimisation
1554 Xini = numpy.zeros(Xb.shape)
1556 # Définition de la fonction-coût
1557 # ------------------------------
1558 def CostFunction(w):
1559 _W = numpy.asmatrix(numpy.ravel( w )).T
1560 if selfA._parameters["StoreInternalVariables"] or \
1561 selfA._toStore("CurrentState") or \
1562 selfA._toStore("CurrentOptimum"):
1563 selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
1564 if selfA._toStore("SimulatedObservationAtCurrentState") or \
1565 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1566 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
1567 if selfA._toStore("InnovationAtCurrentState"):
1568 selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
1570 Jb = float( 0.5 * _W.T * HBHTpR * _W )
1571 Jo = float( - _W.T * Innovation )
1574 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1575 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1576 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1577 selfA.StoredVariables["CostFunctionJ" ].store( J )
1578 if selfA._toStore("IndexOfOptimum") or \
1579 selfA._toStore("CurrentOptimum") or \
1580 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1581 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1582 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1583 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1584 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1585 if selfA._toStore("IndexOfOptimum"):
1586 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1587 if selfA._toStore("CurrentOptimum"):
1588 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1589 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1590 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1591 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1592 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1593 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1594 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1595 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1596 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1599 def GradientOfCostFunction(w):
1600 _W = numpy.asmatrix(numpy.ravel( w )).T
1601 GradJb = HBHTpR * _W
1602 GradJo = - Innovation
1603 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1606 # Minimisation de la fonctionnelle
1607 # --------------------------------
1608 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1610 if selfA._parameters["Minimizer"] == "LBFGSB":
1611 # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1612 if "0.19" <= scipy.version.version <= "1.1.0":
1613 import lbfgsbhlt as optimiseur
1615 import scipy.optimize as optimiseur
1616 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1617 func = CostFunction,
1619 fprime = GradientOfCostFunction,
1621 bounds = selfA._parameters["Bounds"],
1622 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
1623 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
1624 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1625 iprint = selfA._parameters["optiprint"],
1627 nfeval = Informations['funcalls']
1628 rc = Informations['warnflag']
1629 elif selfA._parameters["Minimizer"] == "TNC":
1630 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1631 func = CostFunction,
1633 fprime = GradientOfCostFunction,
1635 bounds = selfA._parameters["Bounds"],
1636 maxfun = selfA._parameters["MaximumNumberOfSteps"],
1637 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1638 ftol = selfA._parameters["CostDecrementTolerance"],
1639 messages = selfA._parameters["optmessages"],
1641 elif selfA._parameters["Minimizer"] == "CG":
1642 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1645 fprime = GradientOfCostFunction,
1647 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1648 gtol = selfA._parameters["GradientNormTolerance"],
1649 disp = selfA._parameters["optdisp"],
1652 elif selfA._parameters["Minimizer"] == "NCG":
1653 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1656 fprime = GradientOfCostFunction,
1658 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1659 avextol = selfA._parameters["CostDecrementTolerance"],
1660 disp = selfA._parameters["optdisp"],
1663 elif selfA._parameters["Minimizer"] == "BFGS":
1664 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1667 fprime = GradientOfCostFunction,
1669 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1670 gtol = selfA._parameters["GradientNormTolerance"],
1671 disp = selfA._parameters["optdisp"],
1675 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1677 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1678 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1680 # Correction pour pallier a un bug de TNC sur le retour du Minimum
1681 # ----------------------------------------------------------------
1682 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1683 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1684 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1686 Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
1688 # Obtention de l'analyse
1689 # ----------------------
1692 selfA.StoredVariables["Analysis"].store( Xa )
1694 if selfA._toStore("OMA") or \
1695 selfA._toStore("SigmaObs2") or \
1696 selfA._toStore("SimulationQuantiles") or \
1697 selfA._toStore("SimulatedObservationAtOptimum"):
1698 if selfA._toStore("SimulatedObservationAtCurrentState"):
1699 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1700 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1701 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1705 # Calcul de la covariance d'analyse
1706 # ---------------------------------
1707 if selfA._toStore("APosterioriCovariance") or \
1708 selfA._toStore("SimulationQuantiles") or \
1709 selfA._toStore("JacobianMatrixAtOptimum") or \
1710 selfA._toStore("KalmanGainAtOptimum"):
1711 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1712 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1713 if selfA._toStore("APosterioriCovariance") or \
1714 selfA._toStore("SimulationQuantiles") or \
1715 selfA._toStore("KalmanGainAtOptimum"):
1716 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1717 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1718 if selfA._toStore("APosterioriCovariance") or \
1719 selfA._toStore("SimulationQuantiles"):
1725 _ee = numpy.matrix(numpy.zeros(nb)).T
1727 _HtEE = numpy.dot(HtM,_ee)
1728 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
1729 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1730 HessienneI = numpy.matrix( HessienneI )
1732 if min(A.shape) != max(A.shape):
1733 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1734 if (numpy.diag(A) < 0).any():
1735 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1736 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1738 L = numpy.linalg.cholesky( A )
1740 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1741 if selfA._toStore("APosterioriCovariance"):
1742 selfA.StoredVariables["APosterioriCovariance"].store( A )
1743 if selfA._toStore("JacobianMatrixAtOptimum"):
1744 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1745 if selfA._toStore("KalmanGainAtOptimum"):
1746 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1747 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1748 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1750 # Calculs et/ou stockages supplémentaires
1751 # ---------------------------------------
1752 if selfA._toStore("Innovation") or \
1753 selfA._toStore("SigmaObs2") or \
1754 selfA._toStore("MahalanobisConsistency") or \
1755 selfA._toStore("OMB"):
1757 if selfA._toStore("Innovation"):
1758 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1759 if selfA._toStore("BMA"):
1760 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1761 if selfA._toStore("OMA"):
1762 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1763 if selfA._toStore("OMB"):
1764 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1765 if selfA._toStore("SigmaObs2"):
1766 TraceR = R.trace(Y.size)
1767 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1768 if selfA._toStore("MahalanobisConsistency"):
1769 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1770 if selfA._toStore("SimulationQuantiles"):
1771 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1772 HXa = numpy.matrix(numpy.ravel( HXa )).T
1774 for i in range(nech):
1775 if selfA._parameters["SimulationForQuantiles"] == "Linear":
1776 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1777 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1779 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1780 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1781 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1785 YfQ = numpy.hstack((YfQ,Yr))
1788 for quantile in selfA._parameters["Quantiles"]:
1789 if not (0. <= float(quantile) <= 1.): continue
1790 indice = int(nech * float(quantile) - 1./nech)
1791 if YQ is None: YQ = YfQ[:,indice]
1792 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
1793 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1794 if selfA._toStore("SimulatedObservationAtBackground"):
1795 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1796 if selfA._toStore("SimulatedObservationAtOptimum"):
1797 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1801 # ==============================================================================
1802 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
1804 Stochastic EnKF (Envensen 1994, Burgers 1998)
1806 selfA est identique au "self" d'algorithme appelant et contient les
1809 if selfA._parameters["EstimationOf"] == "Parameters":
1810 selfA._parameters["StoreInternalVariables"] = True
1814 H = HO["Direct"].appliedControledFormTo
1816 if selfA._parameters["EstimationOf"] == "State":
1817 M = EM["Direct"].appliedControledFormTo
1819 if CM is not None and "Tangent" in CM and U is not None:
1820 Cm = CM["Tangent"].asMatrix(Xb)
1824 # Nombre de pas identique au nombre de pas d'observations
1825 # -------------------------------------------------------
1826 if hasattr(Y,"stepnumber"):
1827 duration = Y.stepnumber()
1828 __p = numpy.cumprod(Y.shape())[-1]
1831 __p = numpy.array(Y).size
1833 # Précalcul des inversions de B et R
1834 # ----------------------------------
1835 if selfA._parameters["StoreInternalVariables"] \
1836 or selfA._toStore("CostFunctionJ") \
1837 or selfA._toStore("CostFunctionJb") \
1838 or selfA._toStore("CostFunctionJo") \
1839 or selfA._toStore("CurrentOptimum") \
1840 or selfA._toStore("APosterioriCovariance"):
1847 __m = selfA._parameters["NumberOfMembers"]
1848 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1850 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1852 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
1854 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1856 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1857 selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) )
1858 if selfA._toStore("APosterioriCovariance"):
1859 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1862 previousJMinimum = numpy.finfo(float).max
1864 for step in range(duration-1):
1865 if hasattr(Y,"store"):
1866 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
1868 Ynpu = numpy.ravel( Y ).reshape((__p,-1))
1871 if hasattr(U,"store") and len(U)>1:
1872 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1873 elif hasattr(U,"store") and len(U)==1:
1874 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1876 Un = numpy.asmatrix(numpy.ravel( U )).T
1880 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1881 Xn = CovarianceInflation( Xn,
1882 selfA._parameters["InflationType"],
1883 selfA._parameters["InflationFactor"],
1886 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1887 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1889 returnSerieAsArrayMatrix = True )
1890 qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
1891 Xn_predicted = EMX + qi
1892 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1894 returnSerieAsArrayMatrix = True )
1895 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1896 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1897 Xn_predicted = Xn_predicted + Cm * Un
1898 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1899 # --- > Par principe, M = Id, Q = 0
1901 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1903 returnSerieAsArrayMatrix = True )
1905 # Mean of forecast and observation of forecast
1906 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
1907 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
1909 #--------------------------
1910 if VariantM == "KalmanFilterFormula05":
1911 PfHT, HPfHT = 0., 0.
1912 for i in range(__m):
1913 Exfi = Xn_predicted[:,i].reshape((__n,-1)) - Xfm
1914 Eyfi = HX_predicted[:,i].reshape((__p,-1)) - Hfm
1915 PfHT += Exfi * Eyfi.T
1916 HPfHT += Eyfi * Eyfi.T
1917 PfHT = (1./(__m-1)) * PfHT
1918 HPfHT = (1./(__m-1)) * HPfHT
1919 Kn = PfHT * ( R + HPfHT ).I
1922 for i in range(__m):
1923 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
1924 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
1925 #--------------------------
1926 elif VariantM == "KalmanFilterFormula16":
1927 EpY = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
1928 EpYm = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
1930 EaX = EnsembleOfAnomalies( Xn_predicted ) / numpy.sqrt(__m-1)
1931 EaY = (HX_predicted - Hfm - EpY + EpYm) / numpy.sqrt(__m-1)
1933 Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
1935 for i in range(__m):
1936 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
1937 #--------------------------
1939 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1941 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1942 Xn = CovarianceInflation( Xn,
1943 selfA._parameters["InflationType"],
1944 selfA._parameters["InflationFactor"],
1947 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
1948 #--------------------------
1950 if selfA._parameters["StoreInternalVariables"] \
1951 or selfA._toStore("CostFunctionJ") \
1952 or selfA._toStore("CostFunctionJb") \
1953 or selfA._toStore("CostFunctionJo") \
1954 or selfA._toStore("APosterioriCovariance") \
1955 or selfA._toStore("InnovationAtCurrentAnalysis") \
1956 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1957 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1958 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1959 _Innovation = Ynpu - _HXa
1961 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1962 # ---> avec analysis
1963 selfA.StoredVariables["Analysis"].store( Xa )
1964 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1965 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1966 if selfA._toStore("InnovationAtCurrentAnalysis"):
1967 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1968 # ---> avec current state
1969 if selfA._parameters["StoreInternalVariables"] \
1970 or selfA._toStore("CurrentState"):
1971 selfA.StoredVariables["CurrentState"].store( Xn )
1972 if selfA._toStore("ForecastState"):
1973 selfA.StoredVariables["ForecastState"].store( EMX )
1974 if selfA._toStore("BMA"):
1975 selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
1976 if selfA._toStore("InnovationAtCurrentState"):
1977 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1978 if selfA._toStore("SimulatedObservationAtCurrentState") \
1979 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1980 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1982 if selfA._parameters["StoreInternalVariables"] \
1983 or selfA._toStore("CostFunctionJ") \
1984 or selfA._toStore("CostFunctionJb") \
1985 or selfA._toStore("CostFunctionJo") \
1986 or selfA._toStore("CurrentOptimum") \
1987 or selfA._toStore("APosterioriCovariance"):
1988 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1989 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1991 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1992 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1993 selfA.StoredVariables["CostFunctionJ" ].store( J )
1995 if selfA._toStore("IndexOfOptimum") \
1996 or selfA._toStore("CurrentOptimum") \
1997 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1998 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1999 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2000 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2001 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2002 if selfA._toStore("IndexOfOptimum"):
2003 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2004 if selfA._toStore("CurrentOptimum"):
2005 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2006 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2007 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2008 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2009 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2010 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2011 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2012 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2013 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2014 if selfA._toStore("APosterioriCovariance"):
2015 Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2017 Pn = 0.5 * (Pn + Pn.T)
2018 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2019 if selfA._parameters["EstimationOf"] == "Parameters" \
2020 and J < previousJMinimum:
2021 previousJMinimum = J
2023 if selfA._toStore("APosterioriCovariance"):
2024 covarianceXaMin = Pn
2026 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2027 # ----------------------------------------------------------------------
2028 if selfA._parameters["EstimationOf"] == "Parameters":
2029 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2030 selfA.StoredVariables["Analysis"].store( XaMin )
2031 if selfA._toStore("APosterioriCovariance"):
2032 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2033 if selfA._toStore("BMA"):
2034 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2038 # ==============================================================================
2039 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
2041 Ensemble-Transform EnKF (ETKF or Deterministic EnKF: Bishop 2001, Hunt 2007)
2043 selfA est identique au "self" d'algorithme appelant et contient les
2046 if selfA._parameters["EstimationOf"] == "Parameters":
2047 selfA._parameters["StoreInternalVariables"] = True
2051 H = HO["Direct"].appliedControledFormTo
2053 if selfA._parameters["EstimationOf"] == "State":
2054 M = EM["Direct"].appliedControledFormTo
2056 if CM is not None and "Tangent" in CM and U is not None:
2057 Cm = CM["Tangent"].asMatrix(Xb)
2061 # Nombre de pas identique au nombre de pas d'observations
2062 # -------------------------------------------------------
2063 if hasattr(Y,"stepnumber"):
2064 duration = Y.stepnumber()
2065 __p = numpy.cumprod(Y.shape())[-1]
2068 __p = numpy.array(Y).size
2070 # Précalcul des inversions de B et R
2071 # ----------------------------------
2072 if selfA._parameters["StoreInternalVariables"] \
2073 or selfA._toStore("CostFunctionJ") \
2074 or selfA._toStore("CostFunctionJb") \
2075 or selfA._toStore("CostFunctionJo") \
2076 or selfA._toStore("CurrentOptimum") \
2077 or selfA._toStore("APosterioriCovariance"):
2080 elif VariantM != "KalmanFilterFormula":
2082 if VariantM == "KalmanFilterFormula":
2083 RIdemi = R.choleskyI()
2088 __m = selfA._parameters["NumberOfMembers"]
2089 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2091 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2093 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2095 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2097 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2098 selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) )
2099 if selfA._toStore("APosterioriCovariance"):
2100 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2103 previousJMinimum = numpy.finfo(float).max
2105 for step in range(duration-1):
2106 if hasattr(Y,"store"):
2107 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
2109 Ynpu = numpy.ravel( Y ).reshape((__p,-1))
2112 if hasattr(U,"store") and len(U)>1:
2113 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2114 elif hasattr(U,"store") and len(U)==1:
2115 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2117 Un = numpy.asmatrix(numpy.ravel( U )).T
2121 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2122 Xn = CovarianceInflation( Xn,
2123 selfA._parameters["InflationType"],
2124 selfA._parameters["InflationFactor"],
2127 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2128 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2130 returnSerieAsArrayMatrix = True )
2131 qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2132 Xn_predicted = EMX + qi
2133 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2135 returnSerieAsArrayMatrix = True )
2136 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2137 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2138 Xn_predicted = Xn_predicted + Cm * Un
2139 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2140 # --- > Par principe, M = Id, Q = 0
2142 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2144 returnSerieAsArrayMatrix = True )
2146 # Mean of forecast and observation of forecast
2147 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2148 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2151 EaX = EnsembleOfAnomalies( Xn_predicted )
2152 EaHX = numpy.array(HX_predicted - Hfm)
2154 #--------------------------
2155 if VariantM == "KalmanFilterFormula":
2156 mS = RIdemi * EaHX / numpy.sqrt(__m-1)
2157 delta = RIdemi * ( Ynpu - Hfm )
2158 mT = numpy.linalg.inv( numpy.eye(__m) + mS.T @ mS )
2159 vw = mT @ mS.transpose() @ delta
2161 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
2164 EaX = EaX / numpy.sqrt(__m-1)
2165 Xn = Xfm + EaX @ ( vw.reshape((__m,-1)) + numpy.sqrt(__m-1) * Tdemi @ mU )
2166 #--------------------------
2167 elif VariantM == "Variational":
2168 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2169 def CostFunction(w):
2170 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2171 _Jo = 0.5 * _A.T @ (RI * _A)
2172 _Jb = 0.5 * (__m-1) * w.T @ w
2175 def GradientOfCostFunction(w):
2176 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2177 _GardJo = - EaHX.T @ (RI * _A)
2178 _GradJb = (__m-1) * w.reshape((__m,1))
2179 _GradJ = _GardJo + _GradJb
2180 return numpy.ravel(_GradJ)
2181 vw = scipy.optimize.fmin_cg(
2183 x0 = numpy.zeros(__m),
2184 fprime = GradientOfCostFunction,
2189 Hto = EaHX.T @ (RI * EaHX)
2190 Htb = (__m-1) * numpy.eye(__m)
2193 Pta = numpy.linalg.inv( Hta )
2194 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2196 Xn = Xfm + EaX @ (vw[:,None] + EWa)
2197 #--------------------------
2198 elif VariantM == "FiniteSize11": # Jauge Boc2011
2199 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2200 def CostFunction(w):
2201 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2202 _Jo = 0.5 * _A.T @ (RI * _A)
2203 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
2206 def GradientOfCostFunction(w):
2207 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2208 _GardJo = - EaHX.T @ (RI * _A)
2209 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
2210 _GradJ = _GardJo + _GradJb
2211 return numpy.ravel(_GradJ)
2212 vw = scipy.optimize.fmin_cg(
2214 x0 = numpy.zeros(__m),
2215 fprime = GradientOfCostFunction,
2220 Hto = EaHX.T @ (RI * EaHX)
2222 ( (1 + 1/__m + vw.T @ vw) * numpy.eye(__m) - 2 * vw @ vw.T ) \
2223 / (1 + 1/__m + vw.T @ vw)**2
2226 Pta = numpy.linalg.inv( Hta )
2227 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2229 Xn = Xfm + EaX @ (vw.reshape((__m,-1)) + EWa)
2230 #--------------------------
2231 elif VariantM == "FiniteSize15": # Jauge Boc2015
2232 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2233 def CostFunction(w):
2234 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2235 _Jo = 0.5 * _A.T * RI * _A
2236 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
2239 def GradientOfCostFunction(w):
2240 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2241 _GardJo = - EaHX.T @ (RI * _A)
2242 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
2243 _GradJ = _GardJo + _GradJb
2244 return numpy.ravel(_GradJ)
2245 vw = scipy.optimize.fmin_cg(
2247 x0 = numpy.zeros(__m),
2248 fprime = GradientOfCostFunction,
2253 Hto = EaHX.T @ (RI * EaHX)
2255 ( (1 + 1/__m + vw.T @ vw) * numpy.eye(__m) - 2 * vw @ vw.T ) \
2256 / (1 + 1/__m + vw.T @ vw)**2
2259 Pta = numpy.linalg.inv( Hta )
2260 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2262 Xn = Xfm + EaX @ (vw.reshape((__m,-1)) + EWa)
2263 #--------------------------
2264 elif VariantM == "FiniteSize16": # Jauge Boc2016
2265 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2266 def CostFunction(w):
2267 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2268 _Jo = 0.5 * _A.T @ (RI * _A)
2269 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
2272 def GradientOfCostFunction(w):
2273 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2274 _GardJo = - EaHX.T @ (RI * _A)
2275 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
2276 _GradJ = _GardJo + _GradJb
2277 return numpy.ravel(_GradJ)
2278 vw = scipy.optimize.fmin_cg(
2280 x0 = numpy.zeros(__m),
2281 fprime = GradientOfCostFunction,
2286 Hto = EaHX.T @ (RI * EaHX)
2287 Htb = ((__m+1) / (__m-1)) * \
2288 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.eye(__m) - 2 * vw @ vw.T / (__m-1) ) \
2289 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
2292 Pta = numpy.linalg.inv( Hta )
2293 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2295 Xn = Xfm + EaX @ (vw[:,None] + EWa)
2296 #--------------------------
2298 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2300 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2301 Xn = CovarianceInflation( Xn,
2302 selfA._parameters["InflationType"],
2303 selfA._parameters["InflationFactor"],
2306 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2307 #--------------------------
2309 if selfA._parameters["StoreInternalVariables"] \
2310 or selfA._toStore("CostFunctionJ") \
2311 or selfA._toStore("CostFunctionJb") \
2312 or selfA._toStore("CostFunctionJo") \
2313 or selfA._toStore("APosterioriCovariance") \
2314 or selfA._toStore("InnovationAtCurrentAnalysis") \
2315 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2316 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2317 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2318 _Innovation = Ynpu - _HXa
2320 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2321 # ---> avec analysis
2322 selfA.StoredVariables["Analysis"].store( Xa )
2323 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2324 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2325 if selfA._toStore("InnovationAtCurrentAnalysis"):
2326 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2327 # ---> avec current state
2328 if selfA._parameters["StoreInternalVariables"] \
2329 or selfA._toStore("CurrentState"):
2330 selfA.StoredVariables["CurrentState"].store( Xn )
2331 if selfA._toStore("ForecastState"):
2332 selfA.StoredVariables["ForecastState"].store( EMX )
2333 if selfA._toStore("BMA"):
2334 selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
2335 if selfA._toStore("InnovationAtCurrentState"):
2336 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu.reshape((__p,1)) )
2337 if selfA._toStore("SimulatedObservationAtCurrentState") \
2338 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2339 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2341 if selfA._parameters["StoreInternalVariables"] \
2342 or selfA._toStore("CostFunctionJ") \
2343 or selfA._toStore("CostFunctionJb") \
2344 or selfA._toStore("CostFunctionJo") \
2345 or selfA._toStore("CurrentOptimum") \
2346 or selfA._toStore("APosterioriCovariance"):
2347 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2348 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2350 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2351 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2352 selfA.StoredVariables["CostFunctionJ" ].store( J )
2354 if selfA._toStore("IndexOfOptimum") \
2355 or selfA._toStore("CurrentOptimum") \
2356 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2357 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2358 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2359 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2360 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2361 if selfA._toStore("IndexOfOptimum"):
2362 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2363 if selfA._toStore("CurrentOptimum"):
2364 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2365 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2366 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2367 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2368 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2369 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2370 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2371 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2372 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2373 if selfA._toStore("APosterioriCovariance"):
2374 Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2376 Pn = 0.5 * (Pn + Pn.T)
2377 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2378 if selfA._parameters["EstimationOf"] == "Parameters" \
2379 and J < previousJMinimum:
2380 previousJMinimum = J
2382 if selfA._toStore("APosterioriCovariance"):
2383 covarianceXaMin = Pn
2385 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2386 # ----------------------------------------------------------------------
2387 if selfA._parameters["EstimationOf"] == "Parameters":
2388 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2389 selfA.StoredVariables["Analysis"].store( XaMin )
2390 if selfA._toStore("APosterioriCovariance"):
2391 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2392 if selfA._toStore("BMA"):
2393 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2397 # ==============================================================================
2398 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
2399 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
2401 Maximum Likelihood Ensemble Filter (EnKF/MLEF Zupanski 2005, Bocquet 2013)
2403 selfA est identique au "self" d'algorithme appelant et contient les
2406 if selfA._parameters["EstimationOf"] == "Parameters":
2407 selfA._parameters["StoreInternalVariables"] = True
2411 H = HO["Direct"].appliedControledFormTo
2413 if selfA._parameters["EstimationOf"] == "State":
2414 M = EM["Direct"].appliedControledFormTo
2416 if CM is not None and "Tangent" in CM and U is not None:
2417 Cm = CM["Tangent"].asMatrix(Xb)
2421 # Nombre de pas identique au nombre de pas d'observations
2422 # -------------------------------------------------------
2423 if hasattr(Y,"stepnumber"):
2424 duration = Y.stepnumber()
2425 __p = numpy.cumprod(Y.shape())[-1]
2428 __p = numpy.array(Y).size
2430 # Précalcul des inversions de B et R
2431 # ----------------------------------
2432 if selfA._parameters["StoreInternalVariables"] \
2433 or selfA._toStore("CostFunctionJ") \
2434 or selfA._toStore("CostFunctionJb") \
2435 or selfA._toStore("CostFunctionJo") \
2436 or selfA._toStore("CurrentOptimum") \
2437 or selfA._toStore("APosterioriCovariance"):
2444 __m = selfA._parameters["NumberOfMembers"]
2445 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2447 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2449 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2451 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2453 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2454 selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) )
2455 if selfA._toStore("APosterioriCovariance"):
2456 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2459 previousJMinimum = numpy.finfo(float).max
2461 for step in range(duration-1):
2462 if hasattr(Y,"store"):
2463 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
2465 Ynpu = numpy.ravel( Y ).reshape((__p,-1))
2468 if hasattr(U,"store") and len(U)>1:
2469 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2470 elif hasattr(U,"store") and len(U)==1:
2471 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2473 Un = numpy.asmatrix(numpy.ravel( U )).T
2477 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2478 Xn = CovarianceInflation( Xn,
2479 selfA._parameters["InflationType"],
2480 selfA._parameters["InflationFactor"],
2483 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2484 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2486 returnSerieAsArrayMatrix = True )
2487 qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2488 Xn_predicted = EMX + qi
2489 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2490 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2491 Xn_predicted = Xn_predicted + Cm * Un
2492 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2493 # --- > Par principe, M = Id, Q = 0
2496 #--------------------------
2497 if VariantM == "MLEF13":
2498 Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
2499 EaX = EnsembleOfAnomalies( Xn_predicted ) / numpy.sqrt(__m-1)
2505 vw = numpy.zeros(__m)
2506 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2507 vx1 = (Xfm + EaX @ vw).reshape((__n,-1))
2510 E1 = vx1 + _epsilon * EaX
2512 E1 = vx1 + numpy.sqrt(__m-1) * EaX @ Ta
2514 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2516 returnSerieAsArrayMatrix = True )
2517 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
2520 EaY = (HE2 - vy2) / _epsilon
2522 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / numpy.sqrt(__m-1)
2524 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
2525 mH = numpy.eye(__m) + EaY.transpose() @ (RI * EaY)
2526 Deltaw = - numpy.linalg.solve(mH,GradJ)
2531 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2536 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2538 Xn = vx1 + numpy.sqrt(__m-1) * EaX @ Ta @ Ua
2539 #--------------------------
2541 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2543 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2544 Xn = CovarianceInflation( Xn,
2545 selfA._parameters["InflationType"],
2546 selfA._parameters["InflationFactor"],
2549 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2550 #--------------------------
2552 if selfA._parameters["StoreInternalVariables"] \
2553 or selfA._toStore("CostFunctionJ") \
2554 or selfA._toStore("CostFunctionJb") \
2555 or selfA._toStore("CostFunctionJo") \
2556 or selfA._toStore("APosterioriCovariance") \
2557 or selfA._toStore("InnovationAtCurrentAnalysis") \
2558 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2559 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2560 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2561 _Innovation = Ynpu - _HXa
2563 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2564 # ---> avec analysis
2565 selfA.StoredVariables["Analysis"].store( Xa )
2566 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2567 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2568 if selfA._toStore("InnovationAtCurrentAnalysis"):
2569 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2570 # ---> avec current state
2571 if selfA._parameters["StoreInternalVariables"] \
2572 or selfA._toStore("CurrentState"):
2573 selfA.StoredVariables["CurrentState"].store( Xn )
2574 if selfA._toStore("ForecastState"):
2575 selfA.StoredVariables["ForecastState"].store( EMX )
2576 if selfA._toStore("BMA"):
2577 selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
2578 if selfA._toStore("InnovationAtCurrentState"):
2579 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu.reshape((__p,-1)) )
2580 if selfA._toStore("SimulatedObservationAtCurrentState") \
2581 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2582 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2584 if selfA._parameters["StoreInternalVariables"] \
2585 or selfA._toStore("CostFunctionJ") \
2586 or selfA._toStore("CostFunctionJb") \
2587 or selfA._toStore("CostFunctionJo") \
2588 or selfA._toStore("CurrentOptimum") \
2589 or selfA._toStore("APosterioriCovariance"):
2590 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2591 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2593 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2594 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2595 selfA.StoredVariables["CostFunctionJ" ].store( J )
2597 if selfA._toStore("IndexOfOptimum") \
2598 or selfA._toStore("CurrentOptimum") \
2599 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2600 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2601 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2602 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2603 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2604 if selfA._toStore("IndexOfOptimum"):
2605 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2606 if selfA._toStore("CurrentOptimum"):
2607 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2608 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2609 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2610 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2611 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2612 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2613 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2614 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2615 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2616 if selfA._toStore("APosterioriCovariance"):
2617 Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2619 Pn = 0.5 * (Pn + Pn.T)
2620 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2621 if selfA._parameters["EstimationOf"] == "Parameters" \
2622 and J < previousJMinimum:
2623 previousJMinimum = J
2625 if selfA._toStore("APosterioriCovariance"):
2626 covarianceXaMin = Pn
2628 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2629 # ----------------------------------------------------------------------
2630 if selfA._parameters["EstimationOf"] == "Parameters":
2631 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2632 selfA.StoredVariables["Analysis"].store( XaMin )
2633 if selfA._toStore("APosterioriCovariance"):
2634 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2635 if selfA._toStore("BMA"):
2636 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2640 # ==============================================================================
2641 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
2642 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
2644 Iterative EnKF (Sakov 2012, Sakov 2018)
2646 selfA est identique au "self" d'algorithme appelant et contient les
2649 if selfA._parameters["EstimationOf"] == "Parameters":
2650 selfA._parameters["StoreInternalVariables"] = True
2654 H = HO["Direct"].appliedControledFormTo
2656 if selfA._parameters["EstimationOf"] == "State":
2657 M = EM["Direct"].appliedControledFormTo
2659 if CM is not None and "Tangent" in CM and U is not None:
2660 Cm = CM["Tangent"].asMatrix(Xb)
2664 # Nombre de pas identique au nombre de pas d'observations
2665 # -------------------------------------------------------
2666 if hasattr(Y,"stepnumber"):
2667 duration = Y.stepnumber()
2668 __p = numpy.cumprod(Y.shape())[-1]
2671 __p = numpy.array(Y).size
2673 # Précalcul des inversions de B et R
2674 # ----------------------------------
2675 if selfA._parameters["StoreInternalVariables"] \
2676 or selfA._toStore("CostFunctionJ") \
2677 or selfA._toStore("CostFunctionJb") \
2678 or selfA._toStore("CostFunctionJo") \
2679 or selfA._toStore("CurrentOptimum") \
2680 or selfA._toStore("APosterioriCovariance"):
2687 __m = selfA._parameters["NumberOfMembers"]
2688 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2690 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2692 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2694 Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
2696 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2697 selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) )
2698 if selfA._toStore("APosterioriCovariance"):
2699 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2702 previousJMinimum = numpy.finfo(float).max
2704 for step in range(duration-1):
2705 if hasattr(Y,"store"):
2706 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
2708 Ynpu = numpy.ravel( Y ).reshape((__p,-1))
2711 if hasattr(U,"store") and len(U)>1:
2712 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2713 elif hasattr(U,"store") and len(U)==1:
2714 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2716 Un = numpy.asmatrix(numpy.ravel( U )).T
2720 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2721 Xn = CovarianceInflation( Xn,
2722 selfA._parameters["InflationType"],
2723 selfA._parameters["InflationFactor"],
2726 #--------------------------
2727 if VariantM == "IEnKF12":
2728 Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
2729 EaX = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1)
2734 vw = numpy.zeros(__m)
2735 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2736 vx1 = (Xfm + EaX @ vw).reshape((__n,-1))
2739 E1 = vx1 + _epsilon * EaX
2741 E1 = vx1 + numpy.sqrt(__m-1) * EaX @ Ta
2743 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
2744 E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2746 returnSerieAsArrayMatrix = True )
2747 elif selfA._parameters["EstimationOf"] == "Parameters":
2748 # --- > Par principe, M = Id
2750 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2751 vy1 = H((vx2, Un)).reshape((__p,-1))
2753 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
2755 returnSerieAsArrayMatrix = True )
2756 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
2759 EaY = (HE2 - vy2) / _epsilon
2761 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / numpy.sqrt(__m-1)
2763 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
2764 mH = numpy.eye(__m) + EaY.transpose() @ (RI * EaY)
2765 Deltaw = - numpy.linalg.solve(mH,GradJ)
2770 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2774 A2 = EnsembleOfAnomalies( E2 )
2777 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2778 A2 = numpy.sqrt(__m-1) * A2 @ Ta / _epsilon
2781 #--------------------------
2783 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2785 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2786 Xn = CovarianceInflation( Xn,
2787 selfA._parameters["InflationType"],
2788 selfA._parameters["InflationFactor"],
2791 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2792 #--------------------------
2794 if selfA._parameters["StoreInternalVariables"] \
2795 or selfA._toStore("CostFunctionJ") \
2796 or selfA._toStore("CostFunctionJb") \
2797 or selfA._toStore("CostFunctionJo") \
2798 or selfA._toStore("APosterioriCovariance") \
2799 or selfA._toStore("InnovationAtCurrentAnalysis") \
2800 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2801 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2802 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2803 _Innovation = Ynpu - _HXa
2805 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2806 # ---> avec analysis
2807 selfA.StoredVariables["Analysis"].store( Xa )
2808 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2809 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2810 if selfA._toStore("InnovationAtCurrentAnalysis"):
2811 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2812 # ---> avec current state
2813 if selfA._parameters["StoreInternalVariables"] \
2814 or selfA._toStore("CurrentState"):
2815 selfA.StoredVariables["CurrentState"].store( Xn )
2816 if selfA._toStore("ForecastState"):
2817 selfA.StoredVariables["ForecastState"].store( E2 )
2818 if selfA._toStore("BMA"):
2819 selfA.StoredVariables["BMA"].store( E2 - Xa )
2820 if selfA._toStore("InnovationAtCurrentState"):
2821 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu.reshape((__p,-1)) )
2822 if selfA._toStore("SimulatedObservationAtCurrentState") \
2823 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2824 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2826 if selfA._parameters["StoreInternalVariables"] \
2827 or selfA._toStore("CostFunctionJ") \
2828 or selfA._toStore("CostFunctionJb") \
2829 or selfA._toStore("CostFunctionJo") \
2830 or selfA._toStore("CurrentOptimum") \
2831 or selfA._toStore("APosterioriCovariance"):
2832 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2833 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2835 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2836 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2837 selfA.StoredVariables["CostFunctionJ" ].store( J )
2839 if selfA._toStore("IndexOfOptimum") \
2840 or selfA._toStore("CurrentOptimum") \
2841 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2842 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2843 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2844 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2845 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2846 if selfA._toStore("IndexOfOptimum"):
2847 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2848 if selfA._toStore("CurrentOptimum"):
2849 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2850 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2851 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2852 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2853 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2854 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2855 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2856 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2857 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2858 if selfA._toStore("APosterioriCovariance"):
2859 Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2861 Pn = 0.5 * (Pn + Pn.T)
2862 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2863 if selfA._parameters["EstimationOf"] == "Parameters" \
2864 and J < previousJMinimum:
2865 previousJMinimum = J
2867 if selfA._toStore("APosterioriCovariance"):
2868 covarianceXaMin = Pn
2870 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2871 # ----------------------------------------------------------------------
2872 if selfA._parameters["EstimationOf"] == "Parameters":
2873 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2874 selfA.StoredVariables["Analysis"].store( XaMin )
2875 if selfA._toStore("APosterioriCovariance"):
2876 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2877 if selfA._toStore("BMA"):
2878 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2882 # ==============================================================================
2883 if __name__ == "__main__":
2884 print('\n AUTODIAGNOSTIC\n')