1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les objets numériques génériques.
26 __author__ = "Jean-Philippe ARGAUD"
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
36 # ==============================================================================
37 def ExecuteFunction( paire ):
38 assert len(paire) == 2, "Incorrect number of arguments"
40 __X = numpy.asmatrix(numpy.ravel( X )).T
41 __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42 __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43 __fonction = getattr(__module,funcrepr["__userFunction__name"])
44 sys.path = __sys_path_tmp ; del __sys_path_tmp
45 __HX = __fonction( __X )
46 return numpy.ravel( __HX )
48 # ==============================================================================
49 class FDApproximation(object):
51 Cette classe sert d'interface pour définir les opérateurs approximés. A la
52 création d'un objet, en fournissant une fonction "Function", on obtient un
53 objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
54 "AdjointOperator". On contrôle l'approximation DF avec l'incrément
55 multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
56 "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
57 centrées si le booléen "centeredDF" est vrai.
60 name = "FDApproximation",
65 avoidingRedundancy = True,
66 toleranceInRedundancy = 1.e-18,
67 lenghtOfRedundancy = -1,
72 self.__name = str(name)
75 import multiprocessing
76 self.__mpEnabled = True
78 self.__mpEnabled = False
80 self.__mpEnabled = False
81 self.__mpWorkers = mpWorkers
82 if self.__mpWorkers is not None and self.__mpWorkers < 1:
83 self.__mpWorkers = None
84 logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
87 self.__mfEnabled = True
89 self.__mfEnabled = False
90 logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
92 if avoidingRedundancy:
94 self.__tolerBP = float(toleranceInRedundancy)
95 self.__lenghtRJ = int(lenghtOfRedundancy)
96 self.__listJPCP = [] # Jacobian Previous Calculated Points
97 self.__listJPCI = [] # Jacobian Previous Calculated Increment
98 self.__listJPCR = [] # Jacobian Previous Calculated Results
99 self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
100 self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
102 self.__avoidRC = False
105 if isinstance(Function,types.FunctionType):
106 logging.debug("FDA Calculs en multiprocessing : FunctionType")
107 self.__userFunction__name = Function.__name__
109 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
111 mod = os.path.abspath(Function.__globals__['__file__'])
112 if not os.path.isfile(mod):
113 raise ImportError("No user defined function or method found with the name %s"%(mod,))
114 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
115 self.__userFunction__path = os.path.dirname(mod)
117 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
118 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
119 elif isinstance(Function,types.MethodType):
120 logging.debug("FDA Calculs en multiprocessing : MethodType")
121 self.__userFunction__name = Function.__name__
123 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
125 mod = os.path.abspath(Function.__func__.__globals__['__file__'])
126 if not os.path.isfile(mod):
127 raise ImportError("No user defined function or method found with the name %s"%(mod,))
128 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
129 self.__userFunction__path = os.path.dirname(mod)
131 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
132 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
134 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
136 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
137 self.__userFunction = self.__userOperator.appliedTo
139 self.__centeredDF = bool(centeredDF)
140 if abs(float(increment)) > 1.e-15:
141 self.__increment = float(increment)
143 self.__increment = 0.01
147 self.__dX = numpy.asmatrix(numpy.ravel( dX )).T
148 logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
150 logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
152 # ---------------------------------------------------------
153 def __doublon__(self, e, l, n, v=None):
154 __ac, __iac = False, -1
155 for i in range(len(l)-1,-1,-1):
156 if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
157 __ac, __iac = True, i
158 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
162 # ---------------------------------------------------------
163 def DirectOperator(self, X ):
165 Calcul du direct à l'aide de la fonction fournie.
167 logging.debug("FDA Calcul DirectOperator (explicite)")
169 _HX = self.__userFunction( X, argsAsSerie = True )
171 _X = numpy.asmatrix(numpy.ravel( X )).T
172 _HX = numpy.ravel(self.__userFunction( _X ))
176 # ---------------------------------------------------------
177 def TangentMatrix(self, X ):
179 Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
180 c'est-à-dire le gradient de H en X. On utilise des différences finies
181 directionnelles autour du point X. X est un numpy.matrix.
183 Différences finies centrées (approximation d'ordre 2):
184 1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
185 dX[i] à la composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
186 on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
188 2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
190 3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
192 Différences finies non centrées (approximation d'ordre 1):
193 1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
194 composante X[i] pour composer X_plus_dXi, et on calcule la réponse
195 HX_plus_dXi = H( X_plus_dXi )
196 2/ On calcule la valeur centrale HX = H(X)
197 3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
199 4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
202 logging.debug("FDA Début du calcul de la Jacobienne")
203 logging.debug("FDA Incrément de............: %s*X"%float(self.__increment))
204 logging.debug("FDA Approximation centrée...: %s"%(self.__centeredDF))
206 if X is None or len(X)==0:
207 raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
209 _X = numpy.asmatrix(numpy.ravel( X )).T
211 if self.__dX is None:
212 _dX = self.__increment * _X
214 _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
216 if (_dX == 0.).any():
219 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
221 _dX = numpy.where( _dX == 0., moyenne, _dX )
223 __alreadyCalculated = False
225 __bidon, __alreadyCalculatedP = self.__doublon__(_X, self.__listJPCP, self.__listJPPN, None)
226 __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
227 if __alreadyCalculatedP == __alreadyCalculatedI > -1:
228 __alreadyCalculated, __i = True, __alreadyCalculatedP
229 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
231 if __alreadyCalculated:
232 logging.debug("FDA Calcul Jacobienne (par récupération du doublon %i)"%__i)
233 _Jacobienne = self.__listJPCR[__i]
235 logging.debug("FDA Calcul Jacobienne (explicite)")
236 if self.__centeredDF:
238 if self.__mpEnabled and not self.__mfEnabled:
240 "__userFunction__path" : self.__userFunction__path,
241 "__userFunction__modl" : self.__userFunction__modl,
242 "__userFunction__name" : self.__userFunction__name,
245 for i in range( len(_dX) ):
247 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
248 _X_plus_dXi[i] = _X[i] + _dXi
249 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
250 _X_moins_dXi[i] = _X[i] - _dXi
252 _jobs.append( (_X_plus_dXi, funcrepr) )
253 _jobs.append( (_X_moins_dXi, funcrepr) )
255 import multiprocessing
256 self.__pool = multiprocessing.Pool(self.__mpWorkers)
257 _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
262 for i in range( len(_dX) ):
263 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
265 elif self.__mfEnabled:
267 for i in range( len(_dX) ):
269 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
270 _X_plus_dXi[i] = _X[i] + _dXi
271 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
272 _X_moins_dXi[i] = _X[i] - _dXi
274 _xserie.append( _X_plus_dXi )
275 _xserie.append( _X_moins_dXi )
277 _HX_plusmoins_dX = self.DirectOperator( _xserie )
280 for i in range( len(_dX) ):
281 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
285 for i in range( _dX.size ):
287 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
288 _X_plus_dXi[i] = _X[i] + _dXi
289 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
290 _X_moins_dXi[i] = _X[i] - _dXi
292 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
293 _HX_moins_dXi = self.DirectOperator( _X_moins_dXi )
295 _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
299 if self.__mpEnabled and not self.__mfEnabled:
301 "__userFunction__path" : self.__userFunction__path,
302 "__userFunction__modl" : self.__userFunction__modl,
303 "__userFunction__name" : self.__userFunction__name,
306 _jobs.append( (_X.A1, funcrepr) )
307 for i in range( len(_dX) ):
308 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
309 _X_plus_dXi[i] = _X[i] + _dX[i]
311 _jobs.append( (_X_plus_dXi, funcrepr) )
313 import multiprocessing
314 self.__pool = multiprocessing.Pool(self.__mpWorkers)
315 _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
319 _HX = _HX_plus_dX.pop(0)
322 for i in range( len(_dX) ):
323 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
325 elif self.__mfEnabled:
327 _xserie.append( _X.A1 )
328 for i in range( len(_dX) ):
329 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
330 _X_plus_dXi[i] = _X[i] + _dX[i]
332 _xserie.append( _X_plus_dXi )
334 _HX_plus_dX = self.DirectOperator( _xserie )
336 _HX = _HX_plus_dX.pop(0)
339 for i in range( len(_dX) ):
340 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
344 _HX = self.DirectOperator( _X )
345 for i in range( _dX.size ):
347 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
348 _X_plus_dXi[i] = _X[i] + _dXi
350 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
352 _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
355 _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
357 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
358 while len(self.__listJPCP) > self.__lenghtRJ:
359 self.__listJPCP.pop(0)
360 self.__listJPCI.pop(0)
361 self.__listJPCR.pop(0)
362 self.__listJPPN.pop(0)
363 self.__listJPIN.pop(0)
364 self.__listJPCP.append( copy.copy(_X) )
365 self.__listJPCI.append( copy.copy(_dX) )
366 self.__listJPCR.append( copy.copy(_Jacobienne) )
367 self.__listJPPN.append( numpy.linalg.norm(_X) )
368 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
370 logging.debug("FDA Fin du calcul de la Jacobienne")
374 # ---------------------------------------------------------
375 def TangentOperator(self, paire ):
377 Calcul du tangent à l'aide de la Jacobienne.
380 assert len(paire) == 1, "Incorrect lenght of arguments"
382 assert len(_paire) == 2, "Incorrect number of arguments"
384 assert len(paire) == 2, "Incorrect number of arguments"
387 _Jacobienne = self.TangentMatrix( X )
388 if dX is None or len(dX) == 0:
390 # Calcul de la forme matricielle si le second argument est None
391 # -------------------------------------------------------------
392 if self.__mfEnabled: return [_Jacobienne,]
393 else: return _Jacobienne
396 # Calcul de la valeur linéarisée de H en X appliqué à dX
397 # ------------------------------------------------------
398 _dX = numpy.asmatrix(numpy.ravel( dX )).T
399 _HtX = numpy.dot(_Jacobienne, _dX)
400 if self.__mfEnabled: return [_HtX.A1,]
403 # ---------------------------------------------------------
404 def AdjointOperator(self, paire ):
406 Calcul de l'adjoint à l'aide de la Jacobienne.
409 assert len(paire) == 1, "Incorrect lenght of arguments"
411 assert len(_paire) == 2, "Incorrect number of arguments"
413 assert len(paire) == 2, "Incorrect number of arguments"
416 _JacobienneT = self.TangentMatrix( X ).T
417 if Y is None or len(Y) == 0:
419 # Calcul de la forme matricielle si le second argument est None
420 # -------------------------------------------------------------
421 if self.__mfEnabled: return [_JacobienneT,]
422 else: return _JacobienneT
425 # Calcul de la valeur de l'adjoint en X appliqué à Y
426 # --------------------------------------------------
427 _Y = numpy.asmatrix(numpy.ravel( Y )).T
428 _HaY = numpy.dot(_JacobienneT, _Y)
429 if self.__mfEnabled: return [_HaY.A1,]
432 # ==============================================================================
444 Implémentation informatique de l'algorithme MMQR, basée sur la publication :
445 David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
446 Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
449 # Recuperation des donnees et informations initiales
450 # --------------------------------------------------
451 variables = numpy.ravel( x0 )
452 mesures = numpy.ravel( y )
453 increment = sys.float_info[0]
456 quantile = float(quantile)
458 # Calcul des parametres du MM
459 # ---------------------------
460 tn = float(toler) / n
461 e0 = -tn / math.log(tn)
462 epsilon = (e0-tn)/(1+math.log(e0))
464 # Calculs d'initialisation
465 # ------------------------
466 residus = mesures - numpy.ravel( func( variables ) )
467 poids = 1./(epsilon+numpy.abs(residus))
468 veps = 1. - 2. * quantile - residus * poids
469 lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
472 # Recherche iterative
473 # -------------------
474 while (increment > toler) and (iteration < maxfun) :
477 Derivees = numpy.array(fprime(variables))
478 Derivees = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
479 DeriveesT = Derivees.transpose()
480 M = numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
481 SM = numpy.transpose(numpy.dot( DeriveesT , veps ))
482 step = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
484 variables = variables + step
485 if bounds is not None:
486 # Attention : boucle infinie à éviter si un intervalle est trop petit
487 while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
489 variables = variables - step
490 residus = mesures - numpy.ravel( func(variables) )
491 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
493 while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
495 variables = variables - step
496 residus = mesures - numpy.ravel( func(variables) )
497 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
499 increment = lastsurrogate-surrogate
500 poids = 1./(epsilon+numpy.abs(residus))
501 veps = 1. - 2. * quantile - residus * poids
502 lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
506 Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
508 return variables, Ecart, [n,p,iteration,increment,0]
510 # ==============================================================================
511 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
512 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
514 _bgcenter = numpy.ravel(_bgcenter)[:,None]
516 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
518 if _bgcovariance is None:
519 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
521 _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
522 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
524 return BackgroundEnsemble
526 # ==============================================================================
527 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
528 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
529 def __CenteredRandomAnomalies(Zr, N):
531 Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
532 notes manuscrites de MB et conforme au code de PS avec eps = -1
535 Q = numpy.eye(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
536 Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
537 R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
542 _bgcenter = numpy.ravel(_bgcenter)[:,None]
544 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
545 if _bgcovariance is None:
546 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
549 U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
550 _nbctl = _bgcenter.size
551 if _nbmembers > _nbctl:
552 _Z = numpy.concatenate((numpy.dot(
553 numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
554 numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
556 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
557 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
558 BackgroundEnsemble = _bgcenter + _Zca
560 if max(abs(_bgcovariance.flatten())) > 0:
561 _nbctl = _bgcenter.size
562 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
563 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
564 BackgroundEnsemble = _bgcenter + _Zca
566 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
568 return BackgroundEnsemble
570 # ==============================================================================
571 def EnsembleOfAnomalies( _ensemble, _optmean = None):
572 "Renvoie les anomalies centrées à partir d'un ensemble TailleEtat*NbMembres"
574 Em = numpy.asarray(_ensemble).mean(axis=1, dtype=mfp).astype('float')[:,numpy.newaxis]
576 Em = numpy.ravel(_optmean)[:,numpy.newaxis]
578 return numpy.asarray(_ensemble) - Em
580 # ==============================================================================
581 def CovarianceInflation(
583 InflationType = None,
584 InflationFactor = None,
585 BackgroundCov = None,
588 Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
590 Synthèse : Hunt 2007, section 2.3.5
592 if InflationFactor is None:
595 InflationFactor = float(InflationFactor)
597 if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
598 if InflationFactor < 1.:
599 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
600 if InflationFactor < 1.+mpr:
602 OutputCovOrEns = InflationFactor**2 * InputCovOrEns
604 elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
605 if InflationFactor < 1.:
606 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
607 if InflationFactor < 1.+mpr:
609 InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
610 OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
611 + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
613 elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
614 if InflationFactor < 0.:
615 raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
616 if InflationFactor < mpr:
618 __n, __m = numpy.asarray(InputCovOrEns).shape
620 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
621 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.eye(__n)
623 elif InflationType == "HybridOnBackgroundCovariance":
624 if InflationFactor < 0.:
625 raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
626 if InflationFactor < mpr:
628 __n, __m = numpy.asarray(InputCovOrEns).shape
630 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
631 if BackgroundCov is None:
632 raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
633 if InputCovOrEns.shape != BackgroundCov.shape:
634 raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
635 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
637 elif InflationType == "Relaxation":
638 raise NotImplementedError("InflationType Relaxation")
641 raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
643 return OutputCovOrEns
645 # ==============================================================================
646 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
648 Chapeau : 3DVAR multi-pas et multi-méthodes
653 Xn = numpy.ravel(Xb).reshape((-1,1))
655 if selfA._parameters["EstimationOf"] == "State":
656 M = EM["Direct"].appliedTo
658 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
659 selfA.StoredVariables["Analysis"].store( Xn )
660 if selfA._toStore("APosterioriCovariance"):
661 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(Xn.size)
663 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
664 if selfA._toStore("ForecastState"):
665 selfA.StoredVariables["ForecastState"].store( Xn )
667 if hasattr(Y,"stepnumber"):
668 duration = Y.stepnumber()
674 for step in range(duration-1):
675 if hasattr(Y,"store"):
676 Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
678 Ynpu = numpy.ravel( Y ).reshape((-1,1))
680 if selfA._parameters["EstimationOf"] == "State": # Forecast
681 Xn = selfA.StoredVariables["Analysis"][-1]
682 Xn_predicted = M( Xn )
683 if selfA._toStore("ForecastState"):
684 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
685 elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
686 # --- > Par principe, M = Id, Q = 0
688 Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
690 oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
694 # ==============================================================================
695 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
697 3DVAR (Bouttier 1999, Courtier 1993)
699 selfA est identique au "self" d'algorithme appelant et contient les
705 Hm = HO["Direct"].appliedTo
706 Ha = HO["Adjoint"].appliedInXTo
708 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
709 # ----------------------------------------------------
710 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
711 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
714 HXb = numpy.asmatrix(numpy.ravel( HXb )).T
715 if Y.size != HXb.size:
716 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
717 if max(Y.shape) != max(HXb.shape):
718 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
720 if selfA._toStore("JacobianMatrixAtBackground"):
721 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
722 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
723 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
725 # Précalcul des inversions de B et R
726 # ----------------------------------
730 # Point de démarrage de l'optimisation
731 # ------------------------------------
732 Xini = selfA._parameters["InitializationPoint"]
734 # Définition de la fonction-coût
735 # ------------------------------
737 _X = numpy.asmatrix(numpy.ravel( x )).T
738 if selfA._parameters["StoreInternalVariables"] or \
739 selfA._toStore("CurrentState") or \
740 selfA._toStore("CurrentOptimum"):
741 selfA.StoredVariables["CurrentState"].store( _X )
743 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
744 _Innovation = Y - _HX
745 if selfA._toStore("SimulatedObservationAtCurrentState") or \
746 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
747 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
748 if selfA._toStore("InnovationAtCurrentState"):
749 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
751 Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
752 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
755 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
756 selfA.StoredVariables["CostFunctionJb"].store( Jb )
757 selfA.StoredVariables["CostFunctionJo"].store( Jo )
758 selfA.StoredVariables["CostFunctionJ" ].store( J )
759 if selfA._toStore("IndexOfOptimum") or \
760 selfA._toStore("CurrentOptimum") or \
761 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
762 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
763 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
764 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
765 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
766 if selfA._toStore("IndexOfOptimum"):
767 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
768 if selfA._toStore("CurrentOptimum"):
769 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
770 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
771 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
772 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
773 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
774 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
775 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
776 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
777 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
780 def GradientOfCostFunction(x):
781 _X = numpy.asmatrix(numpy.ravel( x )).T
783 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
784 GradJb = BI * (_X - Xb)
785 GradJo = - Ha( (_X, RI * (Y - _HX)) )
786 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
789 # Minimisation de la fonctionnelle
790 # --------------------------------
791 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
793 if selfA._parameters["Minimizer"] == "LBFGSB":
794 if "0.19" <= scipy.version.version <= "1.1.0":
795 import lbfgsbhlt as optimiseur
797 import scipy.optimize as optimiseur
798 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
801 fprime = GradientOfCostFunction,
803 bounds = selfA._parameters["Bounds"],
804 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
805 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
806 pgtol = selfA._parameters["ProjectedGradientTolerance"],
807 iprint = selfA._parameters["optiprint"],
809 nfeval = Informations['funcalls']
810 rc = Informations['warnflag']
811 elif selfA._parameters["Minimizer"] == "TNC":
812 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
815 fprime = GradientOfCostFunction,
817 bounds = selfA._parameters["Bounds"],
818 maxfun = selfA._parameters["MaximumNumberOfSteps"],
819 pgtol = selfA._parameters["ProjectedGradientTolerance"],
820 ftol = selfA._parameters["CostDecrementTolerance"],
821 messages = selfA._parameters["optmessages"],
823 elif selfA._parameters["Minimizer"] == "CG":
824 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
827 fprime = GradientOfCostFunction,
829 maxiter = selfA._parameters["MaximumNumberOfSteps"],
830 gtol = selfA._parameters["GradientNormTolerance"],
831 disp = selfA._parameters["optdisp"],
834 elif selfA._parameters["Minimizer"] == "NCG":
835 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
838 fprime = GradientOfCostFunction,
840 maxiter = selfA._parameters["MaximumNumberOfSteps"],
841 avextol = selfA._parameters["CostDecrementTolerance"],
842 disp = selfA._parameters["optdisp"],
845 elif selfA._parameters["Minimizer"] == "BFGS":
846 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
849 fprime = GradientOfCostFunction,
851 maxiter = selfA._parameters["MaximumNumberOfSteps"],
852 gtol = selfA._parameters["GradientNormTolerance"],
853 disp = selfA._parameters["optdisp"],
857 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
859 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
860 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
862 # Correction pour pallier a un bug de TNC sur le retour du Minimum
863 # ----------------------------------------------------------------
864 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
865 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
867 # Obtention de l'analyse
868 # ----------------------
869 Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
871 selfA.StoredVariables["Analysis"].store( Xa )
873 if selfA._toStore("OMA") or \
874 selfA._toStore("SigmaObs2") or \
875 selfA._toStore("SimulationQuantiles") or \
876 selfA._toStore("SimulatedObservationAtOptimum"):
877 if selfA._toStore("SimulatedObservationAtCurrentState"):
878 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
879 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
880 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
884 # Calcul de la covariance d'analyse
885 # ---------------------------------
886 if selfA._toStore("APosterioriCovariance") or \
887 selfA._toStore("SimulationQuantiles") or \
888 selfA._toStore("JacobianMatrixAtOptimum") or \
889 selfA._toStore("KalmanGainAtOptimum"):
890 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
891 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
892 if selfA._toStore("APosterioriCovariance") or \
893 selfA._toStore("SimulationQuantiles") or \
894 selfA._toStore("KalmanGainAtOptimum"):
895 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
896 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
897 if selfA._toStore("APosterioriCovariance") or \
898 selfA._toStore("SimulationQuantiles"):
902 _ee = numpy.matrix(numpy.zeros(nb)).T
904 _HtEE = numpy.dot(HtM,_ee)
905 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
906 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
907 HessienneI = numpy.matrix( HessienneI )
909 if min(A.shape) != max(A.shape):
910 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
911 if (numpy.diag(A) < 0).any():
912 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
913 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
915 L = numpy.linalg.cholesky( A )
917 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
918 if selfA._toStore("APosterioriCovariance"):
919 selfA.StoredVariables["APosterioriCovariance"].store( A )
920 if selfA._toStore("JacobianMatrixAtOptimum"):
921 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
922 if selfA._toStore("KalmanGainAtOptimum"):
923 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
924 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
925 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
927 # Calculs et/ou stockages supplémentaires
928 # ---------------------------------------
929 if selfA._toStore("Innovation") or \
930 selfA._toStore("SigmaObs2") or \
931 selfA._toStore("MahalanobisConsistency") or \
932 selfA._toStore("OMB"):
934 if selfA._toStore("Innovation"):
935 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
936 if selfA._toStore("BMA"):
937 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
938 if selfA._toStore("OMA"):
939 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
940 if selfA._toStore("OMB"):
941 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
942 if selfA._toStore("SigmaObs2"):
943 TraceR = R.trace(Y.size)
944 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
945 if selfA._toStore("MahalanobisConsistency"):
946 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
947 if selfA._toStore("SimulationQuantiles"):
948 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
949 HXa = numpy.matrix(numpy.ravel( HXa )).T
951 for i in range(nech):
952 if selfA._parameters["SimulationForQuantiles"] == "Linear":
953 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
954 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
956 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
957 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
958 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
962 YfQ = numpy.hstack((YfQ,Yr))
965 for quantile in selfA._parameters["Quantiles"]:
966 if not (0. <= float(quantile) <= 1.): continue
967 indice = int(nech * float(quantile) - 1./nech)
968 if YQ is None: YQ = YfQ[:,indice]
969 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
970 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
971 if selfA._toStore("SimulatedObservationAtBackground"):
972 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
973 if selfA._toStore("SimulatedObservationAtOptimum"):
974 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
978 # ==============================================================================
979 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
981 3DVAR variational analysis with no inversion of B (Huang 2000)
983 selfA est identique au "self" d'algorithme appelant et contient les
989 Hm = HO["Direct"].appliedTo
990 Ha = HO["Adjoint"].appliedInXTo
992 # Précalcul des inversions de B et R
996 # Point de démarrage de l'optimisation
997 Xini = numpy.zeros(Xb.shape)
999 # Définition de la fonction-coût
1000 # ------------------------------
1001 def CostFunction(v):
1002 _V = numpy.asmatrix(numpy.ravel( v )).T
1004 if selfA._parameters["StoreInternalVariables"] or \
1005 selfA._toStore("CurrentState") or \
1006 selfA._toStore("CurrentOptimum"):
1007 selfA.StoredVariables["CurrentState"].store( _X )
1009 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
1010 _Innovation = Y - _HX
1011 if selfA._toStore("SimulatedObservationAtCurrentState") or \
1012 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1013 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
1014 if selfA._toStore("InnovationAtCurrentState"):
1015 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1017 Jb = float( 0.5 * _V.T * BT * _V )
1018 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1021 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1022 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1023 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1024 selfA.StoredVariables["CostFunctionJ" ].store( J )
1025 if selfA._toStore("IndexOfOptimum") or \
1026 selfA._toStore("CurrentOptimum") or \
1027 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1028 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1029 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1030 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1031 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1032 if selfA._toStore("IndexOfOptimum"):
1033 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1034 if selfA._toStore("CurrentOptimum"):
1035 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1036 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1037 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1038 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1039 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1040 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1041 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1042 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1043 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1046 def GradientOfCostFunction(v):
1047 _V = numpy.asmatrix(numpy.ravel( v )).T
1050 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
1052 GradJo = - Ha( (_X, RI * (Y - _HX)) )
1053 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1056 # Minimisation de la fonctionnelle
1057 # --------------------------------
1058 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1060 if selfA._parameters["Minimizer"] == "LBFGSB":
1061 if "0.19" <= scipy.version.version <= "1.1.0":
1062 import lbfgsbhlt as optimiseur
1064 import scipy.optimize as optimiseur
1065 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1066 func = CostFunction,
1068 fprime = GradientOfCostFunction,
1070 bounds = selfA._parameters["Bounds"],
1071 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
1072 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
1073 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1074 iprint = selfA._parameters["optiprint"],
1076 nfeval = Informations['funcalls']
1077 rc = Informations['warnflag']
1078 elif selfA._parameters["Minimizer"] == "TNC":
1079 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1080 func = CostFunction,
1082 fprime = GradientOfCostFunction,
1084 bounds = selfA._parameters["Bounds"],
1085 maxfun = selfA._parameters["MaximumNumberOfSteps"],
1086 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1087 ftol = selfA._parameters["CostDecrementTolerance"],
1088 messages = selfA._parameters["optmessages"],
1090 elif selfA._parameters["Minimizer"] == "CG":
1091 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1094 fprime = GradientOfCostFunction,
1096 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1097 gtol = selfA._parameters["GradientNormTolerance"],
1098 disp = selfA._parameters["optdisp"],
1101 elif selfA._parameters["Minimizer"] == "NCG":
1102 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1105 fprime = GradientOfCostFunction,
1107 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1108 avextol = selfA._parameters["CostDecrementTolerance"],
1109 disp = selfA._parameters["optdisp"],
1112 elif selfA._parameters["Minimizer"] == "BFGS":
1113 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1116 fprime = GradientOfCostFunction,
1118 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1119 gtol = selfA._parameters["GradientNormTolerance"],
1120 disp = selfA._parameters["optdisp"],
1124 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1126 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1127 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1129 # Correction pour pallier a un bug de TNC sur le retour du Minimum
1130 # ----------------------------------------------------------------
1131 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1132 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1133 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1135 Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
1137 # Obtention de l'analyse
1138 # ----------------------
1141 selfA.StoredVariables["Analysis"].store( Xa )
1143 if selfA._toStore("OMA") or \
1144 selfA._toStore("SigmaObs2") or \
1145 selfA._toStore("SimulationQuantiles") or \
1146 selfA._toStore("SimulatedObservationAtOptimum"):
1147 if selfA._toStore("SimulatedObservationAtCurrentState"):
1148 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1149 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1150 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1154 # Calcul de la covariance d'analyse
1155 # ---------------------------------
1156 if selfA._toStore("APosterioriCovariance") or \
1157 selfA._toStore("SimulationQuantiles") or \
1158 selfA._toStore("JacobianMatrixAtOptimum") or \
1159 selfA._toStore("KalmanGainAtOptimum"):
1160 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1161 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1162 if selfA._toStore("APosterioriCovariance") or \
1163 selfA._toStore("SimulationQuantiles") or \
1164 selfA._toStore("KalmanGainAtOptimum"):
1165 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1166 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1167 if selfA._toStore("APosterioriCovariance") or \
1168 selfA._toStore("SimulationQuantiles"):
1173 _ee = numpy.matrix(numpy.zeros(nb)).T
1175 _HtEE = numpy.dot(HtM,_ee)
1176 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
1177 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1178 HessienneI = numpy.matrix( HessienneI )
1180 if min(A.shape) != max(A.shape):
1181 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1182 if (numpy.diag(A) < 0).any():
1183 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1184 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1186 L = numpy.linalg.cholesky( A )
1188 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1189 if selfA._toStore("APosterioriCovariance"):
1190 selfA.StoredVariables["APosterioriCovariance"].store( A )
1191 if selfA._toStore("JacobianMatrixAtOptimum"):
1192 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1193 if selfA._toStore("KalmanGainAtOptimum"):
1194 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1195 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1196 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1198 # Calculs et/ou stockages supplémentaires
1199 # ---------------------------------------
1200 if selfA._toStore("Innovation") or \
1201 selfA._toStore("SigmaObs2") or \
1202 selfA._toStore("MahalanobisConsistency") or \
1203 selfA._toStore("OMB"):
1205 if selfA._toStore("Innovation"):
1206 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1207 if selfA._toStore("BMA"):
1208 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1209 if selfA._toStore("OMA"):
1210 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1211 if selfA._toStore("OMB"):
1212 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1213 if selfA._toStore("SigmaObs2"):
1214 TraceR = R.trace(Y.size)
1215 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1216 if selfA._toStore("MahalanobisConsistency"):
1217 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1218 if selfA._toStore("SimulationQuantiles"):
1219 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1220 HXa = numpy.matrix(numpy.ravel( HXa )).T
1222 for i in range(nech):
1223 if selfA._parameters["SimulationForQuantiles"] == "Linear":
1224 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1225 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1227 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1228 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1229 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1233 YfQ = numpy.hstack((YfQ,Yr))
1236 for quantile in selfA._parameters["Quantiles"]:
1237 if not (0. <= float(quantile) <= 1.): continue
1238 indice = int(nech * float(quantile) - 1./nech)
1239 if YQ is None: YQ = YfQ[:,indice]
1240 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
1241 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1242 if selfA._toStore("SimulatedObservationAtBackground"):
1243 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1244 if selfA._toStore("SimulatedObservationAtOptimum"):
1245 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1249 # ==============================================================================
1250 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1252 3DVAR incrémental (Courtier 1994, 1997)
1254 selfA est identique au "self" d'algorithme appelant et contient les
1261 # Opérateur non-linéaire pour la boucle externe
1262 Hm = HO["Direct"].appliedTo
1264 # Précalcul des inversions de B et R
1268 # Point de démarrage de l'optimisation
1269 Xini = selfA._parameters["InitializationPoint"]
1271 HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
1272 Innovation = Y - HXb
1279 Xr = Xini.reshape((-1,1))
1280 while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
1284 Ht = HO["Tangent"].asMatrix(Xr)
1285 Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
1287 # Définition de la fonction-coût
1288 # ------------------------------
1289 def CostFunction(dx):
1290 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1291 if selfA._parameters["StoreInternalVariables"] or \
1292 selfA._toStore("CurrentState") or \
1293 selfA._toStore("CurrentOptimum"):
1294 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
1296 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1297 _dInnovation = Innovation - _HdX
1298 if selfA._toStore("SimulatedObservationAtCurrentState") or \
1299 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1300 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
1301 if selfA._toStore("InnovationAtCurrentState"):
1302 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
1304 Jb = float( 0.5 * _dX.T * BI * _dX )
1305 Jo = float( 0.5 * _dInnovation.T * RI * _dInnovation )
1308 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1309 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1310 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1311 selfA.StoredVariables["CostFunctionJ" ].store( J )
1312 if selfA._toStore("IndexOfOptimum") or \
1313 selfA._toStore("CurrentOptimum") or \
1314 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1315 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1316 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1317 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1318 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1319 if selfA._toStore("IndexOfOptimum"):
1320 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1321 if selfA._toStore("CurrentOptimum"):
1322 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1323 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1324 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1325 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1326 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1327 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1328 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1329 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1330 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1333 def GradientOfCostFunction(dx):
1334 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1336 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1337 _dInnovation = Innovation - _HdX
1339 GradJo = - Ht.T @ (RI * _dInnovation)
1340 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1343 # Minimisation de la fonctionnelle
1344 # --------------------------------
1345 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1347 if selfA._parameters["Minimizer"] == "LBFGSB":
1348 # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1349 if "0.19" <= scipy.version.version <= "1.1.0":
1350 import lbfgsbhlt as optimiseur
1352 import scipy.optimize as optimiseur
1353 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1354 func = CostFunction,
1355 x0 = numpy.zeros(Xini.size),
1356 fprime = GradientOfCostFunction,
1358 bounds = selfA._parameters["Bounds"],
1359 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
1360 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
1361 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1362 iprint = selfA._parameters["optiprint"],
1364 nfeval = Informations['funcalls']
1365 rc = Informations['warnflag']
1366 elif selfA._parameters["Minimizer"] == "TNC":
1367 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1368 func = CostFunction,
1369 x0 = numpy.zeros(Xini.size),
1370 fprime = GradientOfCostFunction,
1372 bounds = selfA._parameters["Bounds"],
1373 maxfun = selfA._parameters["MaximumNumberOfSteps"],
1374 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1375 ftol = selfA._parameters["CostDecrementTolerance"],
1376 messages = selfA._parameters["optmessages"],
1378 elif selfA._parameters["Minimizer"] == "CG":
1379 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1381 x0 = numpy.zeros(Xini.size),
1382 fprime = GradientOfCostFunction,
1384 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1385 gtol = selfA._parameters["GradientNormTolerance"],
1386 disp = selfA._parameters["optdisp"],
1389 elif selfA._parameters["Minimizer"] == "NCG":
1390 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1392 x0 = numpy.zeros(Xini.size),
1393 fprime = GradientOfCostFunction,
1395 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1396 avextol = selfA._parameters["CostDecrementTolerance"],
1397 disp = selfA._parameters["optdisp"],
1400 elif selfA._parameters["Minimizer"] == "BFGS":
1401 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1403 x0 = numpy.zeros(Xini.size),
1404 fprime = GradientOfCostFunction,
1406 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1407 gtol = selfA._parameters["GradientNormTolerance"],
1408 disp = selfA._parameters["optdisp"],
1412 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1414 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1415 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1417 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1418 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1419 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1421 Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
1424 DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
1425 iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
1427 # Obtention de l'analyse
1428 # ----------------------
1431 selfA.StoredVariables["Analysis"].store( Xa )
1433 if selfA._toStore("OMA") or \
1434 selfA._toStore("SigmaObs2") or \
1435 selfA._toStore("SimulationQuantiles") or \
1436 selfA._toStore("SimulatedObservationAtOptimum"):
1437 if selfA._toStore("SimulatedObservationAtCurrentState"):
1438 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1439 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1440 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1444 # Calcul de la covariance d'analyse
1445 # ---------------------------------
1446 if selfA._toStore("APosterioriCovariance") or \
1447 selfA._toStore("SimulationQuantiles") or \
1448 selfA._toStore("JacobianMatrixAtOptimum") or \
1449 selfA._toStore("KalmanGainAtOptimum"):
1450 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1451 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1452 if selfA._toStore("APosterioriCovariance") or \
1453 selfA._toStore("SimulationQuantiles") or \
1454 selfA._toStore("KalmanGainAtOptimum"):
1455 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1456 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1457 if selfA._toStore("APosterioriCovariance") or \
1458 selfA._toStore("SimulationQuantiles"):
1462 _ee = numpy.matrix(numpy.zeros(nb)).T
1464 _HtEE = numpy.dot(HtM,_ee)
1465 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
1466 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1467 HessienneI = numpy.matrix( HessienneI )
1469 if min(A.shape) != max(A.shape):
1470 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1471 if (numpy.diag(A) < 0).any():
1472 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1473 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1475 L = numpy.linalg.cholesky( A )
1477 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1478 if selfA._toStore("APosterioriCovariance"):
1479 selfA.StoredVariables["APosterioriCovariance"].store( A )
1480 if selfA._toStore("JacobianMatrixAtOptimum"):
1481 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1482 if selfA._toStore("KalmanGainAtOptimum"):
1483 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1484 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1485 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1487 # Calculs et/ou stockages supplémentaires
1488 # ---------------------------------------
1489 if selfA._toStore("Innovation") or \
1490 selfA._toStore("SigmaObs2") or \
1491 selfA._toStore("MahalanobisConsistency") or \
1492 selfA._toStore("OMB"):
1494 if selfA._toStore("Innovation"):
1495 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1496 if selfA._toStore("BMA"):
1497 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1498 if selfA._toStore("OMA"):
1499 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1500 if selfA._toStore("OMB"):
1501 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1502 if selfA._toStore("SigmaObs2"):
1503 TraceR = R.trace(Y.size)
1504 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1505 if selfA._toStore("MahalanobisConsistency"):
1506 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1507 if selfA._toStore("SimulationQuantiles"):
1508 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1509 HXa = numpy.matrix(numpy.ravel( HXa )).T
1511 for i in range(nech):
1512 if selfA._parameters["SimulationForQuantiles"] == "Linear":
1513 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1514 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1516 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1517 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1518 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1522 YfQ = numpy.hstack((YfQ,Yr))
1525 for quantile in selfA._parameters["Quantiles"]:
1526 if not (0. <= float(quantile) <= 1.): continue
1527 indice = int(nech * float(quantile) - 1./nech)
1528 if YQ is None: YQ = YfQ[:,indice]
1529 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
1530 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1531 if selfA._toStore("SimulatedObservationAtBackground"):
1532 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1533 if selfA._toStore("SimulatedObservationAtOptimum"):
1534 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1538 # ==============================================================================
1539 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1541 3DVAR PSAS (Huang 2000)
1543 selfA est identique au "self" d'algorithme appelant et contient les
1551 Hm = HO["Direct"].appliedTo
1553 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
1554 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
1555 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
1558 HXb = numpy.asmatrix(numpy.ravel( HXb )).T
1559 if Y.size != HXb.size:
1560 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
1561 if max(Y.shape) != max(HXb.shape):
1562 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
1564 if selfA._toStore("JacobianMatrixAtBackground"):
1565 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
1566 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
1567 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
1569 Ht = HO["Tangent"].asMatrix(Xb)
1571 HBHTpR = R + Ht * BHT
1572 Innovation = Y - HXb
1574 # Point de démarrage de l'optimisation
1575 Xini = numpy.zeros(Xb.shape)
1577 # Définition de la fonction-coût
1578 # ------------------------------
1579 def CostFunction(w):
1580 _W = numpy.asmatrix(numpy.ravel( w )).T
1581 if selfA._parameters["StoreInternalVariables"] or \
1582 selfA._toStore("CurrentState") or \
1583 selfA._toStore("CurrentOptimum"):
1584 selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
1585 if selfA._toStore("SimulatedObservationAtCurrentState") or \
1586 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1587 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
1588 if selfA._toStore("InnovationAtCurrentState"):
1589 selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
1591 Jb = float( 0.5 * _W.T * HBHTpR * _W )
1592 Jo = float( - _W.T * Innovation )
1595 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1596 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1597 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1598 selfA.StoredVariables["CostFunctionJ" ].store( J )
1599 if selfA._toStore("IndexOfOptimum") or \
1600 selfA._toStore("CurrentOptimum") or \
1601 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1602 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1603 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1604 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1605 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1606 if selfA._toStore("IndexOfOptimum"):
1607 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1608 if selfA._toStore("CurrentOptimum"):
1609 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1610 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1611 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1612 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1613 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1614 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1615 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1616 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1617 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1620 def GradientOfCostFunction(w):
1621 _W = numpy.asmatrix(numpy.ravel( w )).T
1622 GradJb = HBHTpR * _W
1623 GradJo = - Innovation
1624 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1627 # Minimisation de la fonctionnelle
1628 # --------------------------------
1629 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1631 if selfA._parameters["Minimizer"] == "LBFGSB":
1632 # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1633 if "0.19" <= scipy.version.version <= "1.1.0":
1634 import lbfgsbhlt as optimiseur
1636 import scipy.optimize as optimiseur
1637 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1638 func = CostFunction,
1640 fprime = GradientOfCostFunction,
1642 bounds = selfA._parameters["Bounds"],
1643 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
1644 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
1645 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1646 iprint = selfA._parameters["optiprint"],
1648 nfeval = Informations['funcalls']
1649 rc = Informations['warnflag']
1650 elif selfA._parameters["Minimizer"] == "TNC":
1651 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1652 func = CostFunction,
1654 fprime = GradientOfCostFunction,
1656 bounds = selfA._parameters["Bounds"],
1657 maxfun = selfA._parameters["MaximumNumberOfSteps"],
1658 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1659 ftol = selfA._parameters["CostDecrementTolerance"],
1660 messages = selfA._parameters["optmessages"],
1662 elif selfA._parameters["Minimizer"] == "CG":
1663 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1666 fprime = GradientOfCostFunction,
1668 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1669 gtol = selfA._parameters["GradientNormTolerance"],
1670 disp = selfA._parameters["optdisp"],
1673 elif selfA._parameters["Minimizer"] == "NCG":
1674 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1677 fprime = GradientOfCostFunction,
1679 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1680 avextol = selfA._parameters["CostDecrementTolerance"],
1681 disp = selfA._parameters["optdisp"],
1684 elif selfA._parameters["Minimizer"] == "BFGS":
1685 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1688 fprime = GradientOfCostFunction,
1690 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1691 gtol = selfA._parameters["GradientNormTolerance"],
1692 disp = selfA._parameters["optdisp"],
1696 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1698 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1699 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1701 # Correction pour pallier a un bug de TNC sur le retour du Minimum
1702 # ----------------------------------------------------------------
1703 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1704 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1705 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1707 Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
1709 # Obtention de l'analyse
1710 # ----------------------
1713 selfA.StoredVariables["Analysis"].store( Xa )
1715 if selfA._toStore("OMA") or \
1716 selfA._toStore("SigmaObs2") or \
1717 selfA._toStore("SimulationQuantiles") or \
1718 selfA._toStore("SimulatedObservationAtOptimum"):
1719 if selfA._toStore("SimulatedObservationAtCurrentState"):
1720 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1721 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1722 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1726 # Calcul de la covariance d'analyse
1727 # ---------------------------------
1728 if selfA._toStore("APosterioriCovariance") or \
1729 selfA._toStore("SimulationQuantiles") or \
1730 selfA._toStore("JacobianMatrixAtOptimum") or \
1731 selfA._toStore("KalmanGainAtOptimum"):
1732 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1733 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1734 if selfA._toStore("APosterioriCovariance") or \
1735 selfA._toStore("SimulationQuantiles") or \
1736 selfA._toStore("KalmanGainAtOptimum"):
1737 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1738 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1739 if selfA._toStore("APosterioriCovariance") or \
1740 selfA._toStore("SimulationQuantiles"):
1746 _ee = numpy.matrix(numpy.zeros(nb)).T
1748 _HtEE = numpy.dot(HtM,_ee)
1749 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
1750 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1751 HessienneI = numpy.matrix( HessienneI )
1753 if min(A.shape) != max(A.shape):
1754 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1755 if (numpy.diag(A) < 0).any():
1756 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1757 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1759 L = numpy.linalg.cholesky( A )
1761 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1762 if selfA._toStore("APosterioriCovariance"):
1763 selfA.StoredVariables["APosterioriCovariance"].store( A )
1764 if selfA._toStore("JacobianMatrixAtOptimum"):
1765 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1766 if selfA._toStore("KalmanGainAtOptimum"):
1767 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1768 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1769 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1771 # Calculs et/ou stockages supplémentaires
1772 # ---------------------------------------
1773 if selfA._toStore("Innovation") or \
1774 selfA._toStore("SigmaObs2") or \
1775 selfA._toStore("MahalanobisConsistency") or \
1776 selfA._toStore("OMB"):
1778 if selfA._toStore("Innovation"):
1779 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1780 if selfA._toStore("BMA"):
1781 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1782 if selfA._toStore("OMA"):
1783 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1784 if selfA._toStore("OMB"):
1785 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1786 if selfA._toStore("SigmaObs2"):
1787 TraceR = R.trace(Y.size)
1788 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1789 if selfA._toStore("MahalanobisConsistency"):
1790 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1791 if selfA._toStore("SimulationQuantiles"):
1792 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1793 HXa = numpy.matrix(numpy.ravel( HXa )).T
1795 for i in range(nech):
1796 if selfA._parameters["SimulationForQuantiles"] == "Linear":
1797 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1798 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1800 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1801 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1802 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1806 YfQ = numpy.hstack((YfQ,Yr))
1809 for quantile in selfA._parameters["Quantiles"]:
1810 if not (0. <= float(quantile) <= 1.): continue
1811 indice = int(nech * float(quantile) - 1./nech)
1812 if YQ is None: YQ = YfQ[:,indice]
1813 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
1814 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1815 if selfA._toStore("SimulatedObservationAtBackground"):
1816 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1817 if selfA._toStore("SimulatedObservationAtOptimum"):
1818 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1822 # ==============================================================================
1823 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
1825 Stochastic EnKF (Envensen 1994, Burgers 1998)
1827 selfA est identique au "self" d'algorithme appelant et contient les
1830 if selfA._parameters["EstimationOf"] == "Parameters":
1831 selfA._parameters["StoreInternalVariables"] = True
1835 H = HO["Direct"].appliedControledFormTo
1837 if selfA._parameters["EstimationOf"] == "State":
1838 M = EM["Direct"].appliedControledFormTo
1840 if CM is not None and "Tangent" in CM and U is not None:
1841 Cm = CM["Tangent"].asMatrix(Xb)
1845 # Nombre de pas identique au nombre de pas d'observations
1846 # -------------------------------------------------------
1847 if hasattr(Y,"stepnumber"):
1848 duration = Y.stepnumber()
1849 __p = numpy.cumprod(Y.shape())[-1]
1852 __p = numpy.array(Y).size
1854 # Précalcul des inversions de B et R
1855 # ----------------------------------
1856 if selfA._parameters["StoreInternalVariables"] \
1857 or selfA._toStore("CostFunctionJ") \
1858 or selfA._toStore("CostFunctionJb") \
1859 or selfA._toStore("CostFunctionJo") \
1860 or selfA._toStore("CurrentOptimum") \
1861 or selfA._toStore("APosterioriCovariance"):
1868 __m = selfA._parameters["NumberOfMembers"]
1869 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1871 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1873 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
1875 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1877 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1878 selfA.StoredVariables["Analysis"].store( Xb )
1879 if selfA._toStore("APosterioriCovariance"):
1880 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1883 previousJMinimum = numpy.finfo(float).max
1885 for step in range(duration-1):
1886 if hasattr(Y,"store"):
1887 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
1889 Ynpu = numpy.ravel( Y ).reshape((__p,-1))
1892 if hasattr(U,"store") and len(U)>1:
1893 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1894 elif hasattr(U,"store") and len(U)==1:
1895 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1897 Un = numpy.asmatrix(numpy.ravel( U )).T
1901 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1902 Xn = CovarianceInflation( Xn,
1903 selfA._parameters["InflationType"],
1904 selfA._parameters["InflationFactor"],
1907 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1908 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1910 returnSerieAsArrayMatrix = True )
1911 qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
1912 Xn_predicted = EMX + qi
1913 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1915 returnSerieAsArrayMatrix = True )
1916 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1917 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1918 Xn_predicted = Xn_predicted + Cm * Un
1919 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1920 # --- > Par principe, M = Id, Q = 0
1922 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1924 returnSerieAsArrayMatrix = True )
1926 # Mean of forecast and observation of forecast
1927 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
1928 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
1930 #--------------------------
1931 if VariantM == "KalmanFilterFormula05":
1932 PfHT, HPfHT = 0., 0.
1933 for i in range(__m):
1934 Exfi = Xn_predicted[:,i].reshape((__n,-1)) - Xfm
1935 Eyfi = HX_predicted[:,i].reshape((__p,-1)) - Hfm
1936 PfHT += Exfi * Eyfi.T
1937 HPfHT += Eyfi * Eyfi.T
1938 PfHT = (1./(__m-1)) * PfHT
1939 HPfHT = (1./(__m-1)) * HPfHT
1940 Kn = PfHT * ( R + HPfHT ).I
1943 for i in range(__m):
1944 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
1945 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
1946 #--------------------------
1947 elif VariantM == "KalmanFilterFormula16":
1948 EpY = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
1949 EpYm = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
1951 EaX = EnsembleOfAnomalies( Xn_predicted ) / numpy.sqrt(__m-1)
1952 EaY = (HX_predicted - Hfm - EpY + EpYm) / numpy.sqrt(__m-1)
1954 Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
1956 for i in range(__m):
1957 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
1958 #--------------------------
1960 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1962 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1963 Xn = CovarianceInflation( Xn,
1964 selfA._parameters["InflationType"],
1965 selfA._parameters["InflationFactor"],
1968 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
1969 #--------------------------
1971 if selfA._parameters["StoreInternalVariables"] \
1972 or selfA._toStore("CostFunctionJ") \
1973 or selfA._toStore("CostFunctionJb") \
1974 or selfA._toStore("CostFunctionJo") \
1975 or selfA._toStore("APosterioriCovariance") \
1976 or selfA._toStore("InnovationAtCurrentAnalysis") \
1977 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1978 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1979 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1980 _Innovation = Ynpu - _HXa
1982 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1983 # ---> avec analysis
1984 selfA.StoredVariables["Analysis"].store( Xa )
1985 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1986 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1987 if selfA._toStore("InnovationAtCurrentAnalysis"):
1988 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1989 # ---> avec current state
1990 if selfA._parameters["StoreInternalVariables"] \
1991 or selfA._toStore("CurrentState"):
1992 selfA.StoredVariables["CurrentState"].store( Xn )
1993 if selfA._toStore("ForecastState"):
1994 selfA.StoredVariables["ForecastState"].store( EMX )
1995 if selfA._toStore("BMA"):
1996 selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
1997 if selfA._toStore("InnovationAtCurrentState"):
1998 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1999 if selfA._toStore("SimulatedObservationAtCurrentState") \
2000 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2001 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2003 if selfA._parameters["StoreInternalVariables"] \
2004 or selfA._toStore("CostFunctionJ") \
2005 or selfA._toStore("CostFunctionJb") \
2006 or selfA._toStore("CostFunctionJo") \
2007 or selfA._toStore("CurrentOptimum") \
2008 or selfA._toStore("APosterioriCovariance"):
2009 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2010 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2012 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2013 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2014 selfA.StoredVariables["CostFunctionJ" ].store( J )
2016 if selfA._toStore("IndexOfOptimum") \
2017 or selfA._toStore("CurrentOptimum") \
2018 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2019 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2020 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2021 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2022 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2023 if selfA._toStore("IndexOfOptimum"):
2024 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2025 if selfA._toStore("CurrentOptimum"):
2026 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2027 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2028 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2029 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2030 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2031 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2032 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2033 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2034 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2035 if selfA._toStore("APosterioriCovariance"):
2036 Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2038 Pn = 0.5 * (Pn + Pn.T)
2039 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2040 if selfA._parameters["EstimationOf"] == "Parameters" \
2041 and J < previousJMinimum:
2042 previousJMinimum = J
2044 if selfA._toStore("APosterioriCovariance"):
2045 covarianceXaMin = Pn
2047 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2048 # ----------------------------------------------------------------------
2049 if selfA._parameters["EstimationOf"] == "Parameters":
2050 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2051 selfA.StoredVariables["Analysis"].store( XaMin )
2052 if selfA._toStore("APosterioriCovariance"):
2053 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2054 if selfA._toStore("BMA"):
2055 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2059 # ==============================================================================
2060 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
2062 Ensemble-Transform EnKF (ETKF or Deterministic EnKF: Bishop 2001, Hunt 2007)
2064 selfA est identique au "self" d'algorithme appelant et contient les
2067 if selfA._parameters["EstimationOf"] == "Parameters":
2068 selfA._parameters["StoreInternalVariables"] = True
2072 H = HO["Direct"].appliedControledFormTo
2074 if selfA._parameters["EstimationOf"] == "State":
2075 M = EM["Direct"].appliedControledFormTo
2077 if CM is not None and "Tangent" in CM and U is not None:
2078 Cm = CM["Tangent"].asMatrix(Xb)
2082 # Nombre de pas identique au nombre de pas d'observations
2083 # -------------------------------------------------------
2084 if hasattr(Y,"stepnumber"):
2085 duration = Y.stepnumber()
2086 __p = numpy.cumprod(Y.shape())[-1]
2089 __p = numpy.array(Y).size
2091 # Précalcul des inversions de B et R
2092 # ----------------------------------
2093 if selfA._parameters["StoreInternalVariables"] \
2094 or selfA._toStore("CostFunctionJ") \
2095 or selfA._toStore("CostFunctionJb") \
2096 or selfA._toStore("CostFunctionJo") \
2097 or selfA._toStore("CurrentOptimum") \
2098 or selfA._toStore("APosterioriCovariance"):
2101 elif VariantM != "KalmanFilterFormula":
2103 if VariantM == "KalmanFilterFormula":
2104 RIdemi = R.choleskyI()
2109 __m = selfA._parameters["NumberOfMembers"]
2110 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2112 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2114 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2116 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2118 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2119 selfA.StoredVariables["Analysis"].store( Xb )
2120 if selfA._toStore("APosterioriCovariance"):
2121 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2124 previousJMinimum = numpy.finfo(float).max
2126 for step in range(duration-1):
2127 if hasattr(Y,"store"):
2128 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
2130 Ynpu = numpy.ravel( Y ).reshape((__p,-1))
2133 if hasattr(U,"store") and len(U)>1:
2134 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2135 elif hasattr(U,"store") and len(U)==1:
2136 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2138 Un = numpy.asmatrix(numpy.ravel( U )).T
2142 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2143 Xn = CovarianceInflation( Xn,
2144 selfA._parameters["InflationType"],
2145 selfA._parameters["InflationFactor"],
2148 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2149 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2151 returnSerieAsArrayMatrix = True )
2152 qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2153 Xn_predicted = EMX + qi
2154 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2156 returnSerieAsArrayMatrix = True )
2157 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2158 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2159 Xn_predicted = Xn_predicted + Cm * Un
2160 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2161 # --- > Par principe, M = Id, Q = 0
2163 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2165 returnSerieAsArrayMatrix = True )
2167 # Mean of forecast and observation of forecast
2168 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2169 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
2172 EaX = EnsembleOfAnomalies( Xn_predicted )
2173 EaHX = numpy.array(HX_predicted - Hfm)
2175 #--------------------------
2176 if VariantM == "KalmanFilterFormula":
2177 mS = RIdemi * EaHX / numpy.sqrt(__m-1)
2178 delta = RIdemi * ( Ynpu - Hfm )
2179 mT = numpy.linalg.inv( numpy.eye(__m) + mS.T @ mS )
2180 vw = mT @ mS.transpose() @ delta
2182 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
2185 EaX = EaX / numpy.sqrt(__m-1)
2186 Xn = Xfm + EaX @ ( vw.reshape((__m,-1)) + numpy.sqrt(__m-1) * Tdemi @ mU )
2187 #--------------------------
2188 elif VariantM == "Variational":
2189 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2190 def CostFunction(w):
2191 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2192 _Jo = 0.5 * _A.T @ (RI * _A)
2193 _Jb = 0.5 * (__m-1) * w.T @ w
2196 def GradientOfCostFunction(w):
2197 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2198 _GardJo = - EaHX.T @ (RI * _A)
2199 _GradJb = (__m-1) * w.reshape((__m,1))
2200 _GradJ = _GardJo + _GradJb
2201 return numpy.ravel(_GradJ)
2202 vw = scipy.optimize.fmin_cg(
2204 x0 = numpy.zeros(__m),
2205 fprime = GradientOfCostFunction,
2210 Hto = EaHX.T @ (RI * EaHX)
2211 Htb = (__m-1) * numpy.eye(__m)
2214 Pta = numpy.linalg.inv( Hta )
2215 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2217 Xn = Xfm + EaX @ (vw[:,None] + EWa)
2218 #--------------------------
2219 elif VariantM == "FiniteSize11": # Jauge Boc2011
2220 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2221 def CostFunction(w):
2222 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2223 _Jo = 0.5 * _A.T @ (RI * _A)
2224 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
2227 def GradientOfCostFunction(w):
2228 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2229 _GardJo = - EaHX.T @ (RI * _A)
2230 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
2231 _GradJ = _GardJo + _GradJb
2232 return numpy.ravel(_GradJ)
2233 vw = scipy.optimize.fmin_cg(
2235 x0 = numpy.zeros(__m),
2236 fprime = GradientOfCostFunction,
2241 Hto = EaHX.T @ (RI * EaHX)
2243 ( (1 + 1/__m + vw.T @ vw) * numpy.eye(__m) - 2 * vw @ vw.T ) \
2244 / (1 + 1/__m + vw.T @ vw)**2
2247 Pta = numpy.linalg.inv( Hta )
2248 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2250 Xn = Xfm + EaX @ (vw.reshape((__m,-1)) + EWa)
2251 #--------------------------
2252 elif VariantM == "FiniteSize15": # Jauge Boc2015
2253 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2254 def CostFunction(w):
2255 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2256 _Jo = 0.5 * _A.T * RI * _A
2257 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
2260 def GradientOfCostFunction(w):
2261 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2262 _GardJo = - EaHX.T @ (RI * _A)
2263 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
2264 _GradJ = _GardJo + _GradJb
2265 return numpy.ravel(_GradJ)
2266 vw = scipy.optimize.fmin_cg(
2268 x0 = numpy.zeros(__m),
2269 fprime = GradientOfCostFunction,
2274 Hto = EaHX.T @ (RI * EaHX)
2276 ( (1 + 1/__m + vw.T @ vw) * numpy.eye(__m) - 2 * vw @ vw.T ) \
2277 / (1 + 1/__m + vw.T @ vw)**2
2280 Pta = numpy.linalg.inv( Hta )
2281 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2283 Xn = Xfm + EaX @ (vw.reshape((__m,-1)) + EWa)
2284 #--------------------------
2285 elif VariantM == "FiniteSize16": # Jauge Boc2016
2286 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2287 def CostFunction(w):
2288 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2289 _Jo = 0.5 * _A.T @ (RI * _A)
2290 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
2293 def GradientOfCostFunction(w):
2294 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2295 _GardJo = - EaHX.T @ (RI * _A)
2296 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
2297 _GradJ = _GardJo + _GradJb
2298 return numpy.ravel(_GradJ)
2299 vw = scipy.optimize.fmin_cg(
2301 x0 = numpy.zeros(__m),
2302 fprime = GradientOfCostFunction,
2307 Hto = EaHX.T @ (RI * EaHX)
2308 Htb = ((__m+1) / (__m-1)) * \
2309 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.eye(__m) - 2 * vw @ vw.T / (__m-1) ) \
2310 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
2313 Pta = numpy.linalg.inv( Hta )
2314 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2316 Xn = Xfm + EaX @ (vw[:,None] + EWa)
2317 #--------------------------
2319 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2321 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2322 Xn = CovarianceInflation( Xn,
2323 selfA._parameters["InflationType"],
2324 selfA._parameters["InflationFactor"],
2327 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2328 #--------------------------
2330 if selfA._parameters["StoreInternalVariables"] \
2331 or selfA._toStore("CostFunctionJ") \
2332 or selfA._toStore("CostFunctionJb") \
2333 or selfA._toStore("CostFunctionJo") \
2334 or selfA._toStore("APosterioriCovariance") \
2335 or selfA._toStore("InnovationAtCurrentAnalysis") \
2336 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2337 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2338 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2339 _Innovation = Ynpu - _HXa
2341 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2342 # ---> avec analysis
2343 selfA.StoredVariables["Analysis"].store( Xa )
2344 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2345 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2346 if selfA._toStore("InnovationAtCurrentAnalysis"):
2347 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2348 # ---> avec current state
2349 if selfA._parameters["StoreInternalVariables"] \
2350 or selfA._toStore("CurrentState"):
2351 selfA.StoredVariables["CurrentState"].store( Xn )
2352 if selfA._toStore("ForecastState"):
2353 selfA.StoredVariables["ForecastState"].store( EMX )
2354 if selfA._toStore("BMA"):
2355 selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
2356 if selfA._toStore("InnovationAtCurrentState"):
2357 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu.reshape((__p,1)) )
2358 if selfA._toStore("SimulatedObservationAtCurrentState") \
2359 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2360 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2362 if selfA._parameters["StoreInternalVariables"] \
2363 or selfA._toStore("CostFunctionJ") \
2364 or selfA._toStore("CostFunctionJb") \
2365 or selfA._toStore("CostFunctionJo") \
2366 or selfA._toStore("CurrentOptimum") \
2367 or selfA._toStore("APosterioriCovariance"):
2368 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2369 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2371 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2372 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2373 selfA.StoredVariables["CostFunctionJ" ].store( J )
2375 if selfA._toStore("IndexOfOptimum") \
2376 or selfA._toStore("CurrentOptimum") \
2377 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2378 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2379 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2380 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2381 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2382 if selfA._toStore("IndexOfOptimum"):
2383 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2384 if selfA._toStore("CurrentOptimum"):
2385 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2386 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2387 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2388 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2389 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2390 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2391 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2392 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2393 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2394 if selfA._toStore("APosterioriCovariance"):
2395 Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2397 Pn = 0.5 * (Pn + Pn.T)
2398 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2399 if selfA._parameters["EstimationOf"] == "Parameters" \
2400 and J < previousJMinimum:
2401 previousJMinimum = J
2403 if selfA._toStore("APosterioriCovariance"):
2404 covarianceXaMin = Pn
2406 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2407 # ----------------------------------------------------------------------
2408 if selfA._parameters["EstimationOf"] == "Parameters":
2409 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2410 selfA.StoredVariables["Analysis"].store( XaMin )
2411 if selfA._toStore("APosterioriCovariance"):
2412 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2413 if selfA._toStore("BMA"):
2414 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2418 # ==============================================================================
2419 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
2420 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
2422 Maximum Likelihood Ensemble Filter (EnKF/MLEF Zupanski 2005, Bocquet 2013)
2424 selfA est identique au "self" d'algorithme appelant et contient les
2427 if selfA._parameters["EstimationOf"] == "Parameters":
2428 selfA._parameters["StoreInternalVariables"] = True
2432 H = HO["Direct"].appliedControledFormTo
2434 if selfA._parameters["EstimationOf"] == "State":
2435 M = EM["Direct"].appliedControledFormTo
2437 if CM is not None and "Tangent" in CM and U is not None:
2438 Cm = CM["Tangent"].asMatrix(Xb)
2442 # Nombre de pas identique au nombre de pas d'observations
2443 # -------------------------------------------------------
2444 if hasattr(Y,"stepnumber"):
2445 duration = Y.stepnumber()
2446 __p = numpy.cumprod(Y.shape())[-1]
2449 __p = numpy.array(Y).size
2451 # Précalcul des inversions de B et R
2452 # ----------------------------------
2453 if selfA._parameters["StoreInternalVariables"] \
2454 or selfA._toStore("CostFunctionJ") \
2455 or selfA._toStore("CostFunctionJb") \
2456 or selfA._toStore("CostFunctionJo") \
2457 or selfA._toStore("CurrentOptimum") \
2458 or selfA._toStore("APosterioriCovariance"):
2465 __m = selfA._parameters["NumberOfMembers"]
2466 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2468 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2470 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2472 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2474 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2475 selfA.StoredVariables["Analysis"].store( Xb )
2476 if selfA._toStore("APosterioriCovariance"):
2477 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2480 previousJMinimum = numpy.finfo(float).max
2482 for step in range(duration-1):
2483 if hasattr(Y,"store"):
2484 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
2486 Ynpu = numpy.ravel( Y ).reshape((__p,-1))
2489 if hasattr(U,"store") and len(U)>1:
2490 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2491 elif hasattr(U,"store") and len(U)==1:
2492 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2494 Un = numpy.asmatrix(numpy.ravel( U )).T
2498 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2499 Xn = CovarianceInflation( Xn,
2500 selfA._parameters["InflationType"],
2501 selfA._parameters["InflationFactor"],
2504 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2505 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2507 returnSerieAsArrayMatrix = True )
2508 qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2509 Xn_predicted = EMX + qi
2510 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2511 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2512 Xn_predicted = Xn_predicted + Cm * Un
2513 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2514 # --- > Par principe, M = Id, Q = 0
2517 #--------------------------
2518 if VariantM == "MLEF13":
2519 Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
2520 EaX = EnsembleOfAnomalies( Xn_predicted ) / numpy.sqrt(__m-1)
2526 vw = numpy.zeros(__m)
2527 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2528 vx1 = (Xfm + EaX @ vw).reshape((__n,-1))
2531 E1 = vx1 + _epsilon * EaX
2533 E1 = vx1 + numpy.sqrt(__m-1) * EaX @ Ta
2535 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2537 returnSerieAsArrayMatrix = True )
2538 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
2541 EaY = (HE2 - vy2) / _epsilon
2543 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / numpy.sqrt(__m-1)
2545 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
2546 mH = numpy.eye(__m) + EaY.transpose() @ (RI * EaY)
2547 Deltaw = - numpy.linalg.solve(mH,GradJ)
2552 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2557 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2559 Xn = vx1 + numpy.sqrt(__m-1) * EaX @ Ta @ Ua
2560 #--------------------------
2562 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2564 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2565 Xn = CovarianceInflation( Xn,
2566 selfA._parameters["InflationType"],
2567 selfA._parameters["InflationFactor"],
2570 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2571 #--------------------------
2573 if selfA._parameters["StoreInternalVariables"] \
2574 or selfA._toStore("CostFunctionJ") \
2575 or selfA._toStore("CostFunctionJb") \
2576 or selfA._toStore("CostFunctionJo") \
2577 or selfA._toStore("APosterioriCovariance") \
2578 or selfA._toStore("InnovationAtCurrentAnalysis") \
2579 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2580 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2581 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2582 _Innovation = Ynpu - _HXa
2584 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2585 # ---> avec analysis
2586 selfA.StoredVariables["Analysis"].store( Xa )
2587 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2588 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2589 if selfA._toStore("InnovationAtCurrentAnalysis"):
2590 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2591 # ---> avec current state
2592 if selfA._parameters["StoreInternalVariables"] \
2593 or selfA._toStore("CurrentState"):
2594 selfA.StoredVariables["CurrentState"].store( Xn )
2595 if selfA._toStore("ForecastState"):
2596 selfA.StoredVariables["ForecastState"].store( EMX )
2597 if selfA._toStore("BMA"):
2598 selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
2599 if selfA._toStore("InnovationAtCurrentState"):
2600 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu.reshape((__p,-1)) )
2601 if selfA._toStore("SimulatedObservationAtCurrentState") \
2602 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2603 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2605 if selfA._parameters["StoreInternalVariables"] \
2606 or selfA._toStore("CostFunctionJ") \
2607 or selfA._toStore("CostFunctionJb") \
2608 or selfA._toStore("CostFunctionJo") \
2609 or selfA._toStore("CurrentOptimum") \
2610 or selfA._toStore("APosterioriCovariance"):
2611 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2612 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2614 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2615 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2616 selfA.StoredVariables["CostFunctionJ" ].store( J )
2618 if selfA._toStore("IndexOfOptimum") \
2619 or selfA._toStore("CurrentOptimum") \
2620 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2621 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2622 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2623 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2624 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2625 if selfA._toStore("IndexOfOptimum"):
2626 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2627 if selfA._toStore("CurrentOptimum"):
2628 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2629 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2630 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2631 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2632 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2633 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2634 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2635 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2636 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2637 if selfA._toStore("APosterioriCovariance"):
2638 Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2640 Pn = 0.5 * (Pn + Pn.T)
2641 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2642 if selfA._parameters["EstimationOf"] == "Parameters" \
2643 and J < previousJMinimum:
2644 previousJMinimum = J
2646 if selfA._toStore("APosterioriCovariance"):
2647 covarianceXaMin = Pn
2649 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2650 # ----------------------------------------------------------------------
2651 if selfA._parameters["EstimationOf"] == "Parameters":
2652 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2653 selfA.StoredVariables["Analysis"].store( XaMin )
2654 if selfA._toStore("APosterioriCovariance"):
2655 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2656 if selfA._toStore("BMA"):
2657 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2661 # ==============================================================================
2662 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
2663 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
2665 Iterative EnKF (Sakov 2012, Sakov 2018)
2667 selfA est identique au "self" d'algorithme appelant et contient les
2670 if selfA._parameters["EstimationOf"] == "Parameters":
2671 selfA._parameters["StoreInternalVariables"] = True
2675 H = HO["Direct"].appliedControledFormTo
2677 if selfA._parameters["EstimationOf"] == "State":
2678 M = EM["Direct"].appliedControledFormTo
2680 if CM is not None and "Tangent" in CM and U is not None:
2681 Cm = CM["Tangent"].asMatrix(Xb)
2685 # Nombre de pas identique au nombre de pas d'observations
2686 # -------------------------------------------------------
2687 if hasattr(Y,"stepnumber"):
2688 duration = Y.stepnumber()
2689 __p = numpy.cumprod(Y.shape())[-1]
2692 __p = numpy.array(Y).size
2694 # Précalcul des inversions de B et R
2695 # ----------------------------------
2696 if selfA._parameters["StoreInternalVariables"] \
2697 or selfA._toStore("CostFunctionJ") \
2698 or selfA._toStore("CostFunctionJb") \
2699 or selfA._toStore("CostFunctionJo") \
2700 or selfA._toStore("CurrentOptimum") \
2701 or selfA._toStore("APosterioriCovariance"):
2708 __m = selfA._parameters["NumberOfMembers"]
2709 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2711 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2713 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2715 Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
2717 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2718 selfA.StoredVariables["Analysis"].store( Xb )
2719 if selfA._toStore("APosterioriCovariance"):
2720 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2723 previousJMinimum = numpy.finfo(float).max
2725 for step in range(duration-1):
2726 if hasattr(Y,"store"):
2727 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
2729 Ynpu = numpy.ravel( Y ).reshape((__p,-1))
2732 if hasattr(U,"store") and len(U)>1:
2733 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2734 elif hasattr(U,"store") and len(U)==1:
2735 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2737 Un = numpy.asmatrix(numpy.ravel( U )).T
2741 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2742 Xn = CovarianceInflation( Xn,
2743 selfA._parameters["InflationType"],
2744 selfA._parameters["InflationFactor"],
2747 #--------------------------
2748 if VariantM == "IEnKF12":
2749 Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
2750 EaX = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1)
2755 vw = numpy.zeros(__m)
2756 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2757 vx1 = (Xfm + EaX @ vw).reshape((__n,-1))
2760 E1 = vx1 + _epsilon * EaX
2762 E1 = vx1 + numpy.sqrt(__m-1) * EaX @ Ta
2764 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
2765 E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2767 returnSerieAsArrayMatrix = True )
2768 elif selfA._parameters["EstimationOf"] == "Parameters":
2769 # --- > Par principe, M = Id
2771 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2772 vy1 = H((vx2, Un)).reshape((__p,-1))
2774 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
2776 returnSerieAsArrayMatrix = True )
2777 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
2780 EaY = (HE2 - vy2) / _epsilon
2782 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / numpy.sqrt(__m-1)
2784 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
2785 mH = numpy.eye(__m) + EaY.transpose() @ (RI * EaY)
2786 Deltaw = - numpy.linalg.solve(mH,GradJ)
2791 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2795 A2 = EnsembleOfAnomalies( E2 )
2798 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2799 A2 = numpy.sqrt(__m-1) * A2 @ Ta / _epsilon
2802 #--------------------------
2804 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2806 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2807 Xn = CovarianceInflation( Xn,
2808 selfA._parameters["InflationType"],
2809 selfA._parameters["InflationFactor"],
2812 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2813 #--------------------------
2815 if selfA._parameters["StoreInternalVariables"] \
2816 or selfA._toStore("CostFunctionJ") \
2817 or selfA._toStore("CostFunctionJb") \
2818 or selfA._toStore("CostFunctionJo") \
2819 or selfA._toStore("APosterioriCovariance") \
2820 or selfA._toStore("InnovationAtCurrentAnalysis") \
2821 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2822 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2823 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2824 _Innovation = Ynpu - _HXa
2826 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2827 # ---> avec analysis
2828 selfA.StoredVariables["Analysis"].store( Xa )
2829 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2830 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2831 if selfA._toStore("InnovationAtCurrentAnalysis"):
2832 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2833 # ---> avec current state
2834 if selfA._parameters["StoreInternalVariables"] \
2835 or selfA._toStore("CurrentState"):
2836 selfA.StoredVariables["CurrentState"].store( Xn )
2837 if selfA._toStore("ForecastState"):
2838 selfA.StoredVariables["ForecastState"].store( E2 )
2839 if selfA._toStore("BMA"):
2840 selfA.StoredVariables["BMA"].store( E2 - Xa )
2841 if selfA._toStore("InnovationAtCurrentState"):
2842 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu.reshape((__p,-1)) )
2843 if selfA._toStore("SimulatedObservationAtCurrentState") \
2844 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2845 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2847 if selfA._parameters["StoreInternalVariables"] \
2848 or selfA._toStore("CostFunctionJ") \
2849 or selfA._toStore("CostFunctionJb") \
2850 or selfA._toStore("CostFunctionJo") \
2851 or selfA._toStore("CurrentOptimum") \
2852 or selfA._toStore("APosterioriCovariance"):
2853 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2854 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2856 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2857 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2858 selfA.StoredVariables["CostFunctionJ" ].store( J )
2860 if selfA._toStore("IndexOfOptimum") \
2861 or selfA._toStore("CurrentOptimum") \
2862 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2863 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2864 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2865 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2866 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2867 if selfA._toStore("IndexOfOptimum"):
2868 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2869 if selfA._toStore("CurrentOptimum"):
2870 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2871 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2872 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2873 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2874 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2875 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2876 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2877 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2878 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2879 if selfA._toStore("APosterioriCovariance"):
2880 Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2882 Pn = 0.5 * (Pn + Pn.T)
2883 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2884 if selfA._parameters["EstimationOf"] == "Parameters" \
2885 and J < previousJMinimum:
2886 previousJMinimum = J
2888 if selfA._toStore("APosterioriCovariance"):
2889 covarianceXaMin = Pn
2891 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2892 # ----------------------------------------------------------------------
2893 if selfA._parameters["EstimationOf"] == "Parameters":
2894 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2895 selfA.StoredVariables["Analysis"].store( XaMin )
2896 if selfA._toStore("APosterioriCovariance"):
2897 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2898 if selfA._toStore("BMA"):
2899 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2903 # ==============================================================================
2904 if __name__ == "__main__":
2905 print('\n AUTODIAGNOSTIC\n')