1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les objets numériques génériques.
26 __author__ = "Jean-Philippe ARGAUD"
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
36 # ==============================================================================
37 def ExecuteFunction( paire ):
38 assert len(paire) == 2, "Incorrect number of arguments"
40 __X = numpy.asmatrix(numpy.ravel( X )).T
41 __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42 __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43 __fonction = getattr(__module,funcrepr["__userFunction__name"])
44 sys.path = __sys_path_tmp ; del __sys_path_tmp
45 __HX = __fonction( __X )
46 return numpy.ravel( __HX )
48 # ==============================================================================
49 class FDApproximation(object):
51 Cette classe sert d'interface pour définir les opérateurs approximés. A la
52 création d'un objet, en fournissant une fonction "Function", on obtient un
53 objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
54 "AdjointOperator". On contrôle l'approximation DF avec l'incrément
55 multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
56 "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
57 centrées si le booléen "centeredDF" est vrai.
60 name = "FDApproximation",
65 avoidingRedundancy = True,
66 toleranceInRedundancy = 1.e-18,
67 lenghtOfRedundancy = -1,
72 self.__name = str(name)
75 import multiprocessing
76 self.__mpEnabled = True
78 self.__mpEnabled = False
80 self.__mpEnabled = False
81 self.__mpWorkers = mpWorkers
82 if self.__mpWorkers is not None and self.__mpWorkers < 1:
83 self.__mpWorkers = None
84 logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
87 self.__mfEnabled = True
89 self.__mfEnabled = False
90 logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
92 if avoidingRedundancy:
94 self.__tolerBP = float(toleranceInRedundancy)
95 self.__lenghtRJ = int(lenghtOfRedundancy)
96 self.__listJPCP = [] # Jacobian Previous Calculated Points
97 self.__listJPCI = [] # Jacobian Previous Calculated Increment
98 self.__listJPCR = [] # Jacobian Previous Calculated Results
99 self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
100 self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
102 self.__avoidRC = False
105 if isinstance(Function,types.FunctionType):
106 logging.debug("FDA Calculs en multiprocessing : FunctionType")
107 self.__userFunction__name = Function.__name__
109 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
111 mod = os.path.abspath(Function.__globals__['__file__'])
112 if not os.path.isfile(mod):
113 raise ImportError("No user defined function or method found with the name %s"%(mod,))
114 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
115 self.__userFunction__path = os.path.dirname(mod)
117 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
118 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
119 elif isinstance(Function,types.MethodType):
120 logging.debug("FDA Calculs en multiprocessing : MethodType")
121 self.__userFunction__name = Function.__name__
123 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
125 mod = os.path.abspath(Function.__func__.__globals__['__file__'])
126 if not os.path.isfile(mod):
127 raise ImportError("No user defined function or method found with the name %s"%(mod,))
128 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
129 self.__userFunction__path = os.path.dirname(mod)
131 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
132 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
134 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
136 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
137 self.__userFunction = self.__userOperator.appliedTo
139 self.__centeredDF = bool(centeredDF)
140 if abs(float(increment)) > 1.e-15:
141 self.__increment = float(increment)
143 self.__increment = 0.01
147 self.__dX = numpy.asmatrix(numpy.ravel( dX )).T
148 logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
150 logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
152 # ---------------------------------------------------------
153 def __doublon__(self, e, l, n, v=None):
154 __ac, __iac = False, -1
155 for i in range(len(l)-1,-1,-1):
156 if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
157 __ac, __iac = True, i
158 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
162 # ---------------------------------------------------------
163 def DirectOperator(self, X ):
165 Calcul du direct à l'aide de la fonction fournie.
167 logging.debug("FDA Calcul DirectOperator (explicite)")
169 _HX = self.__userFunction( X, argsAsSerie = True )
171 _X = numpy.asmatrix(numpy.ravel( X )).T
172 _HX = numpy.ravel(self.__userFunction( _X ))
176 # ---------------------------------------------------------
177 def TangentMatrix(self, X ):
179 Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
180 c'est-à-dire le gradient de H en X. On utilise des différences finies
181 directionnelles autour du point X. X est un numpy.matrix.
183 Différences finies centrées (approximation d'ordre 2):
184 1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
185 dX[i] à la composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
186 on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
188 2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
190 3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
192 Différences finies non centrées (approximation d'ordre 1):
193 1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
194 composante X[i] pour composer X_plus_dXi, et on calcule la réponse
195 HX_plus_dXi = H( X_plus_dXi )
196 2/ On calcule la valeur centrale HX = H(X)
197 3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
199 4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
202 logging.debug("FDA Début du calcul de la Jacobienne")
203 logging.debug("FDA Incrément de............: %s*X"%float(self.__increment))
204 logging.debug("FDA Approximation centrée...: %s"%(self.__centeredDF))
206 if X is None or len(X)==0:
207 raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
209 _X = numpy.asmatrix(numpy.ravel( X )).T
211 if self.__dX is None:
212 _dX = self.__increment * _X
214 _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
216 if (_dX == 0.).any():
219 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
221 _dX = numpy.where( _dX == 0., moyenne, _dX )
223 __alreadyCalculated = False
225 __bidon, __alreadyCalculatedP = self.__doublon__(_X, self.__listJPCP, self.__listJPPN, None)
226 __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
227 if __alreadyCalculatedP == __alreadyCalculatedI > -1:
228 __alreadyCalculated, __i = True, __alreadyCalculatedP
229 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
231 if __alreadyCalculated:
232 logging.debug("FDA Calcul Jacobienne (par récupération du doublon %i)"%__i)
233 _Jacobienne = self.__listJPCR[__i]
235 logging.debug("FDA Calcul Jacobienne (explicite)")
236 if self.__centeredDF:
238 if self.__mpEnabled and not self.__mfEnabled:
240 "__userFunction__path" : self.__userFunction__path,
241 "__userFunction__modl" : self.__userFunction__modl,
242 "__userFunction__name" : self.__userFunction__name,
245 for i in range( len(_dX) ):
247 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
248 _X_plus_dXi[i] = _X[i] + _dXi
249 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
250 _X_moins_dXi[i] = _X[i] - _dXi
252 _jobs.append( (_X_plus_dXi, funcrepr) )
253 _jobs.append( (_X_moins_dXi, funcrepr) )
255 import multiprocessing
256 self.__pool = multiprocessing.Pool(self.__mpWorkers)
257 _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
262 for i in range( len(_dX) ):
263 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
265 elif self.__mfEnabled:
267 for i in range( len(_dX) ):
269 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
270 _X_plus_dXi[i] = _X[i] + _dXi
271 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
272 _X_moins_dXi[i] = _X[i] - _dXi
274 _xserie.append( _X_plus_dXi )
275 _xserie.append( _X_moins_dXi )
277 _HX_plusmoins_dX = self.DirectOperator( _xserie )
280 for i in range( len(_dX) ):
281 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
285 for i in range( _dX.size ):
287 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
288 _X_plus_dXi[i] = _X[i] + _dXi
289 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
290 _X_moins_dXi[i] = _X[i] - _dXi
292 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
293 _HX_moins_dXi = self.DirectOperator( _X_moins_dXi )
295 _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
299 if self.__mpEnabled and not self.__mfEnabled:
301 "__userFunction__path" : self.__userFunction__path,
302 "__userFunction__modl" : self.__userFunction__modl,
303 "__userFunction__name" : self.__userFunction__name,
306 _jobs.append( (_X.A1, funcrepr) )
307 for i in range( len(_dX) ):
308 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
309 _X_plus_dXi[i] = _X[i] + _dX[i]
311 _jobs.append( (_X_plus_dXi, funcrepr) )
313 import multiprocessing
314 self.__pool = multiprocessing.Pool(self.__mpWorkers)
315 _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
319 _HX = _HX_plus_dX.pop(0)
322 for i in range( len(_dX) ):
323 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
325 elif self.__mfEnabled:
327 _xserie.append( _X.A1 )
328 for i in range( len(_dX) ):
329 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
330 _X_plus_dXi[i] = _X[i] + _dX[i]
332 _xserie.append( _X_plus_dXi )
334 _HX_plus_dX = self.DirectOperator( _xserie )
336 _HX = _HX_plus_dX.pop(0)
339 for i in range( len(_dX) ):
340 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
344 _HX = self.DirectOperator( _X )
345 for i in range( _dX.size ):
347 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
348 _X_plus_dXi[i] = _X[i] + _dXi
350 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
352 _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
355 _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
357 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
358 while len(self.__listJPCP) > self.__lenghtRJ:
359 self.__listJPCP.pop(0)
360 self.__listJPCI.pop(0)
361 self.__listJPCR.pop(0)
362 self.__listJPPN.pop(0)
363 self.__listJPIN.pop(0)
364 self.__listJPCP.append( copy.copy(_X) )
365 self.__listJPCI.append( copy.copy(_dX) )
366 self.__listJPCR.append( copy.copy(_Jacobienne) )
367 self.__listJPPN.append( numpy.linalg.norm(_X) )
368 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
370 logging.debug("FDA Fin du calcul de la Jacobienne")
374 # ---------------------------------------------------------
375 def TangentOperator(self, paire ):
377 Calcul du tangent à l'aide de la Jacobienne.
380 assert len(paire) == 1, "Incorrect lenght of arguments"
382 assert len(_paire) == 2, "Incorrect number of arguments"
384 assert len(paire) == 2, "Incorrect number of arguments"
387 _Jacobienne = self.TangentMatrix( X )
388 if dX is None or len(dX) == 0:
390 # Calcul de la forme matricielle si le second argument est None
391 # -------------------------------------------------------------
392 if self.__mfEnabled: return [_Jacobienne,]
393 else: return _Jacobienne
396 # Calcul de la valeur linéarisée de H en X appliqué à dX
397 # ------------------------------------------------------
398 _dX = numpy.asmatrix(numpy.ravel( dX )).T
399 _HtX = numpy.dot(_Jacobienne, _dX)
400 if self.__mfEnabled: return [_HtX.A1,]
403 # ---------------------------------------------------------
404 def AdjointOperator(self, paire ):
406 Calcul de l'adjoint à l'aide de la Jacobienne.
409 assert len(paire) == 1, "Incorrect lenght of arguments"
411 assert len(_paire) == 2, "Incorrect number of arguments"
413 assert len(paire) == 2, "Incorrect number of arguments"
416 _JacobienneT = self.TangentMatrix( X ).T
417 if Y is None or len(Y) == 0:
419 # Calcul de la forme matricielle si le second argument est None
420 # -------------------------------------------------------------
421 if self.__mfEnabled: return [_JacobienneT,]
422 else: return _JacobienneT
425 # Calcul de la valeur de l'adjoint en X appliqué à Y
426 # --------------------------------------------------
427 _Y = numpy.asmatrix(numpy.ravel( Y )).T
428 _HaY = numpy.dot(_JacobienneT, _Y)
429 if self.__mfEnabled: return [_HaY.A1,]
432 # ==============================================================================
444 Implémentation informatique de l'algorithme MMQR, basée sur la publication :
445 David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
446 Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
449 # Recuperation des donnees et informations initiales
450 # --------------------------------------------------
451 variables = numpy.ravel( x0 )
452 mesures = numpy.ravel( y )
453 increment = sys.float_info[0]
456 quantile = float(quantile)
458 # Calcul des parametres du MM
459 # ---------------------------
460 tn = float(toler) / n
461 e0 = -tn / math.log(tn)
462 epsilon = (e0-tn)/(1+math.log(e0))
464 # Calculs d'initialisation
465 # ------------------------
466 residus = mesures - numpy.ravel( func( variables ) )
467 poids = 1./(epsilon+numpy.abs(residus))
468 veps = 1. - 2. * quantile - residus * poids
469 lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
472 # Recherche iterative
473 # -------------------
474 while (increment > toler) and (iteration < maxfun) :
477 Derivees = numpy.array(fprime(variables))
478 Derivees = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
479 DeriveesT = Derivees.transpose()
480 M = numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
481 SM = numpy.transpose(numpy.dot( DeriveesT , veps ))
482 step = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
484 variables = variables + step
485 if bounds is not None:
486 # Attention : boucle infinie à éviter si un intervalle est trop petit
487 while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
489 variables = variables - step
490 residus = mesures - numpy.ravel( func(variables) )
491 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
493 while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
495 variables = variables - step
496 residus = mesures - numpy.ravel( func(variables) )
497 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
499 increment = lastsurrogate-surrogate
500 poids = 1./(epsilon+numpy.abs(residus))
501 veps = 1. - 2. * quantile - residus * poids
502 lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
506 Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
508 return variables, Ecart, [n,p,iteration,increment,0]
510 # ==============================================================================
511 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
512 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
514 _bgcenter = numpy.ravel(_bgcenter)[:,None]
516 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
518 if _bgcovariance is None:
519 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
521 _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
522 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
524 return BackgroundEnsemble
526 # ==============================================================================
527 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
528 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
529 def __CenteredRandomAnomalies(Zr, N):
531 Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
532 notes manuscrites de MB et conforme au code de PS avec eps = -1
535 Q = numpy.eye(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
536 Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
537 R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
542 _bgcenter = numpy.ravel(_bgcenter)[:,None]
544 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
545 if _bgcovariance is None:
546 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
549 U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
550 _nbctl = _bgcenter.size
551 if _nbmembers > _nbctl:
552 _Z = numpy.concatenate((numpy.dot(
553 numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
554 numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
556 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
557 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
558 BackgroundEnsemble = _bgcenter + _Zca
560 if max(abs(_bgcovariance.flatten())) > 0:
561 _nbctl = _bgcenter.size
562 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
563 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
564 BackgroundEnsemble = _bgcenter + _Zca
566 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
568 return BackgroundEnsemble
570 # ==============================================================================
571 def EnsembleOfAnomalies( _ensemble, _optmean = None):
572 "Renvoie les anomalies centrées à partir d'un ensemble TailleEtat*NbMembres"
574 Em = numpy.asarray(_ensemble).mean(axis=1, dtype=mfp).astype('float')[:,numpy.newaxis]
576 Em = numpy.ravel(_optmean)[:,numpy.newaxis]
578 return numpy.asarray(_ensemble) - Em
580 # ==============================================================================
581 def CovarianceInflation(
583 InflationType = None,
584 InflationFactor = None,
585 BackgroundCov = None,
588 Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
590 Synthèse : Hunt 2007, section 2.3.5
592 if InflationFactor is None:
595 InflationFactor = float(InflationFactor)
597 if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
598 if InflationFactor < 1.:
599 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
600 if InflationFactor < 1.+mpr:
602 OutputCovOrEns = InflationFactor**2 * InputCovOrEns
604 elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
605 if InflationFactor < 1.:
606 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
607 if InflationFactor < 1.+mpr:
609 InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
610 OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
611 + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
613 elif InflationType in ["AdditiveOnBackgroundCovariance", "AdditiveOnAnalysisCovariance"]:
614 if InflationFactor < 0.:
615 raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
616 if InflationFactor < mpr:
618 __n, __m = numpy.asarray(InputCovOrEns).shape
620 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
621 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.eye(__n)
623 elif InflationType == "HybridOnBackgroundCovariance":
624 if InflationFactor < 0.:
625 raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
626 if InflationFactor < mpr:
628 __n, __m = numpy.asarray(InputCovOrEns).shape
630 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
631 if BackgroundCov is None:
632 raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
633 if InputCovOrEns.shape != BackgroundCov.shape:
634 raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
635 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
637 elif InflationType == "Relaxation":
638 raise NotImplementedError("InflationType Relaxation")
641 raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
643 return OutputCovOrEns
645 # ==============================================================================
646 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
648 Stochastic EnKF (Envensen 1994, Burgers 1998)
650 selfA est identique au "self" d'algorithme appelant et contient les
653 if selfA._parameters["EstimationOf"] == "Parameters":
654 selfA._parameters["StoreInternalVariables"] = True
658 H = HO["Direct"].appliedControledFormTo
660 if selfA._parameters["EstimationOf"] == "State":
661 M = EM["Direct"].appliedControledFormTo
663 if CM is not None and "Tangent" in CM and U is not None:
664 Cm = CM["Tangent"].asMatrix(Xb)
668 # Nombre de pas identique au nombre de pas d'observations
669 # -------------------------------------------------------
670 if hasattr(Y,"stepnumber"):
671 duration = Y.stepnumber()
672 __p = numpy.cumprod(Y.shape())[-1]
675 __p = numpy.array(Y).size
677 # Précalcul des inversions de B et R
678 # ----------------------------------
679 if selfA._parameters["StoreInternalVariables"] \
680 or selfA._toStore("CostFunctionJ") \
681 or selfA._toStore("CostFunctionJb") \
682 or selfA._toStore("CostFunctionJo") \
683 or selfA._toStore("CurrentOptimum") \
684 or selfA._toStore("APosterioriCovariance"):
691 __m = selfA._parameters["NumberOfMembers"]
692 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
694 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
696 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
698 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
700 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
701 selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) )
702 if selfA._toStore("APosterioriCovariance"):
703 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
706 previousJMinimum = numpy.finfo(float).max
708 for step in range(duration-1):
709 if hasattr(Y,"store"):
710 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
712 Ynpu = numpy.ravel( Y ).reshape((__p,-1))
715 if hasattr(U,"store") and len(U)>1:
716 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
717 elif hasattr(U,"store") and len(U)==1:
718 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
720 Un = numpy.asmatrix(numpy.ravel( U )).T
724 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
725 Xn = CovarianceInflation( Xn,
726 selfA._parameters["InflationType"],
727 selfA._parameters["InflationFactor"],
730 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
731 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
733 returnSerieAsArrayMatrix = True )
734 qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
735 Xn_predicted = EMX + qi
736 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
738 returnSerieAsArrayMatrix = True )
739 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
740 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
741 Xn_predicted = Xn_predicted + Cm * Un
742 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
743 # --- > Par principe, M = Id, Q = 0
745 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
747 returnSerieAsArrayMatrix = True )
749 # Mean of forecast and observation of forecast
750 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
751 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
753 #--------------------------
754 if VariantM == "KalmanFilterFormula05":
757 Exfi = Xn_predicted[:,i] - Xfm
758 Eyfi = HX_predicted[:,i] - Hfm
759 PfHT += Exfi * Eyfi.T
760 HPfHT += Eyfi * Eyfi.T
761 PfHT = (1./(__m-1)) * PfHT
762 HPfHT = (1./(__m-1)) * HPfHT
763 Kn = PfHT * ( R + HPfHT ).I
767 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
768 Xn[:,i] = Xn_predicted[:,i] + numpy.ravel(Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])).T
769 #--------------------------
770 elif VariantM == "KalmanFilterFormula16":
771 EpY = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
772 EpYm = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
774 EaX = EnsembleOfAnomalies( Xn_predicted ) / numpy.sqrt(__m-1)
775 EaY = (HX_predicted - Hfm - EpY + EpYm) / numpy.sqrt(__m-1)
777 Kn = EaX @ EaX.T @ numpy.linalg.inv( EaY @ EaY.T)
780 Xn[:,i] = Xn_predicted[:,i] + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
781 #--------------------------
783 raise ValueError("VariantM has to be chosen in the authorized methods list.")
785 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
786 Xn = CovarianceInflation( Xn,
787 selfA._parameters["InflationType"],
788 selfA._parameters["InflationFactor"],
791 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
792 #--------------------------
794 if selfA._parameters["StoreInternalVariables"] \
795 or selfA._toStore("CostFunctionJ") \
796 or selfA._toStore("CostFunctionJb") \
797 or selfA._toStore("CostFunctionJo") \
798 or selfA._toStore("APosterioriCovariance") \
799 or selfA._toStore("InnovationAtCurrentAnalysis") \
800 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
801 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
802 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
803 _Innovation = Ynpu - _HXa
805 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
807 selfA.StoredVariables["Analysis"].store( Xa )
808 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
809 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
810 if selfA._toStore("InnovationAtCurrentAnalysis"):
811 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
812 # ---> avec current state
813 if selfA._parameters["StoreInternalVariables"] \
814 or selfA._toStore("CurrentState"):
815 selfA.StoredVariables["CurrentState"].store( Xn )
816 if selfA._toStore("ForecastState"):
817 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
818 if selfA._toStore("BMA"):
819 selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
820 if selfA._toStore("InnovationAtCurrentState"):
821 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
822 if selfA._toStore("SimulatedObservationAtCurrentState") \
823 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
824 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
826 if selfA._parameters["StoreInternalVariables"] \
827 or selfA._toStore("CostFunctionJ") \
828 or selfA._toStore("CostFunctionJb") \
829 or selfA._toStore("CostFunctionJo") \
830 or selfA._toStore("CurrentOptimum") \
831 or selfA._toStore("APosterioriCovariance"):
832 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
833 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
835 selfA.StoredVariables["CostFunctionJb"].store( Jb )
836 selfA.StoredVariables["CostFunctionJo"].store( Jo )
837 selfA.StoredVariables["CostFunctionJ" ].store( J )
839 if selfA._toStore("IndexOfOptimum") \
840 or selfA._toStore("CurrentOptimum") \
841 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
842 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
843 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
844 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
845 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
846 if selfA._toStore("IndexOfOptimum"):
847 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
848 if selfA._toStore("CurrentOptimum"):
849 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
850 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
851 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
852 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
853 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
854 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
855 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
856 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
857 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
858 if selfA._toStore("APosterioriCovariance"):
859 Eai = (1/numpy.sqrt(__m-1)) * (Xn - Xa.reshape((__n,-1))) # Anomalies
861 Pn = 0.5 * (Pn + Pn.T)
862 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
863 if selfA._parameters["EstimationOf"] == "Parameters" \
864 and J < previousJMinimum:
867 if selfA._toStore("APosterioriCovariance"):
870 # Stockage final supplémentaire de l'optimum en estimation de paramètres
871 # ----------------------------------------------------------------------
872 if selfA._parameters["EstimationOf"] == "Parameters":
873 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
874 selfA.StoredVariables["Analysis"].store( XaMin )
875 if selfA._toStore("APosterioriCovariance"):
876 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
877 if selfA._toStore("BMA"):
878 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
882 # ==============================================================================
883 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
885 Ensemble-Transform EnKF (ETKF or Deterministic EnKF: Bishop 2001, Hunt 2007)
887 selfA est identique au "self" d'algorithme appelant et contient les
890 if selfA._parameters["EstimationOf"] == "Parameters":
891 selfA._parameters["StoreInternalVariables"] = True
895 H = HO["Direct"].appliedControledFormTo
897 if selfA._parameters["EstimationOf"] == "State":
898 M = EM["Direct"].appliedControledFormTo
900 if CM is not None and "Tangent" in CM and U is not None:
901 Cm = CM["Tangent"].asMatrix(Xb)
905 # Nombre de pas identique au nombre de pas d'observations
906 # -------------------------------------------------------
907 if hasattr(Y,"stepnumber"):
908 duration = Y.stepnumber()
909 __p = numpy.cumprod(Y.shape())[-1]
912 __p = numpy.array(Y).size
914 # Précalcul des inversions de B et R
915 # ----------------------------------
916 if selfA._parameters["StoreInternalVariables"] \
917 or selfA._toStore("CostFunctionJ") \
918 or selfA._toStore("CostFunctionJb") \
919 or selfA._toStore("CostFunctionJo") \
920 or selfA._toStore("CurrentOptimum") \
921 or selfA._toStore("APosterioriCovariance"):
924 elif VariantM != "KalmanFilterFormula":
926 if VariantM == "KalmanFilterFormula":
927 RIdemi = R.choleskyI()
932 __m = selfA._parameters["NumberOfMembers"]
933 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
935 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
937 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
939 Xn = numpy.asmatrix(numpy.dot( Xb.reshape((__n,1)), numpy.ones((1,__m)) ))
941 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
942 selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) )
943 if selfA._toStore("APosterioriCovariance"):
944 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
947 previousJMinimum = numpy.finfo(float).max
950 Xn_predicted = numpy.asmatrix(numpy.zeros((__n,__m)))
952 for step in range(duration-1):
953 if hasattr(Y,"store"):
954 Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
956 Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
959 if hasattr(U,"store") and len(U)>1:
960 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
961 elif hasattr(U,"store") and len(U)==1:
962 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
964 Un = numpy.asmatrix(numpy.ravel( U )).T
968 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
969 Xn = CovarianceInflation( Xn,
970 selfA._parameters["InflationType"],
971 selfA._parameters["InflationFactor"],
974 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
975 EMX = M( [(Xn[:,i], Un) for i in range(__m)], argsAsSerie = True )
977 qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn)
978 Xn_predicted[:,i] = (numpy.ravel( EMX[i] ) + qi).reshape((__n,-1))
979 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
981 returnSerieAsArrayMatrix = True )
982 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
983 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
984 Xn_predicted = Xn_predicted + Cm * Un
985 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
986 # --- > Par principe, M = Id, Q = 0
988 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
990 returnSerieAsArrayMatrix = True )
992 # Mean of forecast and observation of forecast
993 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float')
994 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float')
997 EaX = numpy.matrix(Xn_predicted - Xfm.reshape((__n,-1)))
998 EaHX = numpy.matrix(HX_predicted - Hfm.reshape((__p,-1)))
1000 #--------------------------
1001 if VariantM == "KalmanFilterFormula":
1002 EaX = EaX / numpy.sqrt(__m-1)
1003 mS = RIdemi * EaHX / numpy.sqrt(__m-1)
1004 delta = RIdemi * ( Ynpu.reshape((__p,-1)) - Hfm.reshape((__p,-1)) )
1005 mT = numpy.linalg.inv( numpy.eye(__m) + mS.T @ mS )
1006 vw = mT @ mS.transpose() @ delta
1008 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
1011 Xn = Xfm.reshape((__n,-1)) + EaX @ ( vw.reshape((__m,-1)) + numpy.sqrt(__m-1) * Tdemi @ mU )
1012 #--------------------------
1013 elif VariantM == "Variational":
1014 HXfm = H((Xfm, Un)) # Eventuellement Hfm
1015 def CostFunction(w):
1016 _A = Ynpu.reshape((__p,-1)) - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
1017 _Jo = 0.5 * _A.T * RI * _A
1018 _Jb = 0.5 * (__m-1) * w.T @ w
1021 def GradientOfCostFunction(w):
1022 _A = Ynpu.reshape((__p,-1)) - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
1023 _GardJo = - EaHX.T * RI * _A
1024 _GradJb = (__m-1) * w.reshape((__m,1))
1025 _GradJ = _GardJo + _GradJb
1026 return numpy.ravel(_GradJ)
1027 vw = scipy.optimize.fmin_cg(
1029 x0 = numpy.zeros(__m),
1030 fprime = GradientOfCostFunction,
1035 Hto = EaHX.T * RI * EaHX
1036 Htb = (__m-1) * numpy.eye(__m)
1039 Pta = numpy.linalg.inv( Hta )
1040 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1042 Xn = Xfm.reshape((__n,-1)) + EaX @ (vw.reshape((__m,-1)) + EWa)
1043 #--------------------------
1044 elif VariantM == "FiniteSize11": # Jauge Boc2011
1045 HXfm = H((Xfm, Un)) # Eventuellement Hfm
1046 def CostFunction(w):
1047 _A = Ynpu.reshape((__p,-1)) - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
1048 _Jo = 0.5 * _A.T * RI * _A
1049 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
1052 def GradientOfCostFunction(w):
1053 _A = Ynpu.reshape((__p,-1)) - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
1054 _GardJo = - EaHX.T * RI * _A
1055 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1056 _GradJ = _GardJo + _GradJb
1057 return numpy.ravel(_GradJ)
1058 vw = scipy.optimize.fmin_cg(
1060 x0 = numpy.zeros(__m),
1061 fprime = GradientOfCostFunction,
1066 Hto = EaHX.T * RI * EaHX
1068 ( (1 + 1/__m + vw.T @ vw) * numpy.eye(__m) - 2 * vw @ vw.T ) \
1069 / (1 + 1/__m + vw.T @ vw)**2
1072 Pta = numpy.linalg.inv( Hta )
1073 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1075 Xn = Xfm.reshape((__n,-1)) + EaX @ (vw.reshape((__m,-1)) + EWa)
1076 #--------------------------
1077 elif VariantM == "FiniteSize15": # Jauge Boc2015
1078 HXfm = H((Xfm, Un)) # Eventuellement Hfm
1079 def CostFunction(w):
1080 _A = Ynpu.reshape((__p,-1)) - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
1081 _Jo = 0.5 * _A.T * RI * _A
1082 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
1085 def GradientOfCostFunction(w):
1086 _A = Ynpu.reshape((__p,-1)) - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
1087 _GardJo = - EaHX.T * RI * _A
1088 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1089 _GradJ = _GardJo + _GradJb
1090 return numpy.ravel(_GradJ)
1091 vw = scipy.optimize.fmin_cg(
1093 x0 = numpy.zeros(__m),
1094 fprime = GradientOfCostFunction,
1099 Hto = EaHX.T * RI * EaHX
1101 ( (1 + 1/__m + vw.T @ vw) * numpy.eye(__m) - 2 * vw @ vw.T ) \
1102 / (1 + 1/__m + vw.T @ vw)**2
1105 Pta = numpy.linalg.inv( Hta )
1106 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1108 Xn = Xfm.reshape((__n,-1)) + EaX @ (vw.reshape((__m,-1)) + EWa)
1109 #--------------------------
1110 elif VariantM == "FiniteSize16": # Jauge Boc2016
1111 HXfm = H((Xfm, Un)) # Eventuellement Hfm
1112 def CostFunction(w):
1113 _A = Ynpu.reshape((__p,-1)) - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
1114 _Jo = 0.5 * _A.T * RI * _A
1115 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
1118 def GradientOfCostFunction(w):
1119 _A = Ynpu.reshape((__p,-1)) - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
1120 _GardJo = - EaHX.T * RI * _A
1121 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
1122 _GradJ = _GardJo + _GradJb
1123 return numpy.ravel(_GradJ)
1124 vw = scipy.optimize.fmin_cg(
1126 x0 = numpy.zeros(__m),
1127 fprime = GradientOfCostFunction,
1132 Hto = EaHX.T * RI * EaHX
1133 Htb = ((__m+1) / (__m-1)) * \
1134 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.eye(__m) - 2 * vw @ vw.T / (__m-1) ) \
1135 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
1138 Pta = numpy.linalg.inv( Hta )
1139 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1141 Xn = Xfm.reshape((__n,-1)) + EaX @ (vw.reshape((__m,-1)) + EWa)
1142 #--------------------------
1144 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1146 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1147 Xn = CovarianceInflation( Xn,
1148 selfA._parameters["InflationType"],
1149 selfA._parameters["InflationFactor"],
1152 Xa = Xn.mean(axis=1, dtype=mfp).astype('float')
1153 #--------------------------
1155 if selfA._parameters["StoreInternalVariables"] \
1156 or selfA._toStore("CostFunctionJ") \
1157 or selfA._toStore("CostFunctionJb") \
1158 or selfA._toStore("CostFunctionJo") \
1159 or selfA._toStore("APosterioriCovariance") \
1160 or selfA._toStore("InnovationAtCurrentAnalysis") \
1161 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1162 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1163 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1164 _Innovation = Ynpu - _HXa
1166 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1167 # ---> avec analysis
1168 selfA.StoredVariables["Analysis"].store( Xa )
1169 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1170 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1171 if selfA._toStore("InnovationAtCurrentAnalysis"):
1172 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1173 # ---> avec current state
1174 if selfA._parameters["StoreInternalVariables"] \
1175 or selfA._toStore("CurrentState"):
1176 selfA.StoredVariables["CurrentState"].store( Xn )
1177 if selfA._toStore("ForecastState"):
1178 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1179 if selfA._toStore("BMA"):
1180 selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
1181 if selfA._toStore("InnovationAtCurrentState"):
1182 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1183 if selfA._toStore("SimulatedObservationAtCurrentState") \
1184 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1185 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1187 if selfA._parameters["StoreInternalVariables"] \
1188 or selfA._toStore("CostFunctionJ") \
1189 or selfA._toStore("CostFunctionJb") \
1190 or selfA._toStore("CostFunctionJo") \
1191 or selfA._toStore("CurrentOptimum") \
1192 or selfA._toStore("APosterioriCovariance"):
1193 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1194 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1196 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1197 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1198 selfA.StoredVariables["CostFunctionJ" ].store( J )
1200 if selfA._toStore("IndexOfOptimum") \
1201 or selfA._toStore("CurrentOptimum") \
1202 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1203 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1204 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1205 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1206 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1207 if selfA._toStore("IndexOfOptimum"):
1208 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1209 if selfA._toStore("CurrentOptimum"):
1210 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1211 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1212 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1213 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1214 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1215 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1216 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1217 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1218 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1219 if selfA._toStore("APosterioriCovariance"):
1220 Eai = (1/numpy.sqrt(__m-1)) * (Xn - Xa.reshape((__n,-1))) # Anomalies
1222 Pn = 0.5 * (Pn + Pn.T)
1223 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1224 if selfA._parameters["EstimationOf"] == "Parameters" \
1225 and J < previousJMinimum:
1226 previousJMinimum = J
1228 if selfA._toStore("APosterioriCovariance"):
1229 covarianceXaMin = Pn
1231 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1232 # ----------------------------------------------------------------------
1233 if selfA._parameters["EstimationOf"] == "Parameters":
1234 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1235 selfA.StoredVariables["Analysis"].store( XaMin )
1236 if selfA._toStore("APosterioriCovariance"):
1237 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1238 if selfA._toStore("BMA"):
1239 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1243 # ==============================================================================
1244 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
1245 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1247 Maximum Likelihood Ensemble Filter (EnKF/MLEF Zupanski 2005, Bocquet 2013)
1249 selfA est identique au "self" d'algorithme appelant et contient les
1252 if selfA._parameters["EstimationOf"] == "Parameters":
1253 selfA._parameters["StoreInternalVariables"] = True
1257 H = HO["Direct"].appliedControledFormTo
1259 if selfA._parameters["EstimationOf"] == "State":
1260 M = EM["Direct"].appliedControledFormTo
1262 if CM is not None and "Tangent" in CM and U is not None:
1263 Cm = CM["Tangent"].asMatrix(Xb)
1267 # Nombre de pas identique au nombre de pas d'observations
1268 # -------------------------------------------------------
1269 if hasattr(Y,"stepnumber"):
1270 duration = Y.stepnumber()
1271 __p = numpy.cumprod(Y.shape())[-1]
1274 __p = numpy.array(Y).size
1276 # Précalcul des inversions de B et R
1277 # ----------------------------------
1278 if selfA._parameters["StoreInternalVariables"] \
1279 or selfA._toStore("CostFunctionJ") \
1280 or selfA._toStore("CostFunctionJb") \
1281 or selfA._toStore("CostFunctionJo") \
1282 or selfA._toStore("CurrentOptimum") \
1283 or selfA._toStore("APosterioriCovariance"):
1290 __m = selfA._parameters["NumberOfMembers"]
1291 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1293 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1295 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
1297 Xn = BackgroundEnsembleGeneration( Xb, None, __m )
1299 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1300 selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) )
1301 if selfA._toStore("APosterioriCovariance"):
1302 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1305 previousJMinimum = numpy.finfo(float).max
1307 Xn_predicted = numpy.zeros((__n,__m))
1308 for step in range(duration-1):
1309 if hasattr(Y,"store"):
1310 Ynpu = numpy.ravel( Y[step+1] )[:,None]
1312 Ynpu = numpy.ravel( Y )[:,None]
1315 if hasattr(U,"store") and len(U)>1:
1316 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1317 elif hasattr(U,"store") and len(U)==1:
1318 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1320 Un = numpy.asmatrix(numpy.ravel( U )).T
1324 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1325 Xn = CovarianceInflation( Xn,
1326 selfA._parameters["InflationType"],
1327 selfA._parameters["InflationFactor"],
1330 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1331 EMX = M( [(Xn[:,i,numpy.newaxis], Un) for i in range(__m)], argsAsSerie = True )
1332 for i in range(__m):
1333 qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn)
1334 Xn_predicted[:,i] = numpy.ravel( EMX[i] ) + qi
1335 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1336 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1337 Xn_predicted = Xn_predicted + Cm * Un
1338 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1339 # --- > Par principe, M = Id, Q = 0
1342 #--------------------------
1343 if VariantM == "MLEF13":
1344 Xfm = numpy.asarray(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
1345 EaX = numpy.asarray((Xn_predicted - Xfm.reshape((__n,-1))) / numpy.sqrt(__m-1))
1351 vw = numpy.zeros(__m)
1352 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1353 vx1 = numpy.ravel(Xfm) + EaX @ vw
1356 E1 = vx1.reshape((__n,-1)) + _epsilon * EaX
1358 E1 = vx1.reshape((__n,-1)) + numpy.sqrt(__m-1) * EaX @ Ta
1360 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1362 returnSerieAsArrayMatrix = True )
1363 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
1366 EaY = (HE2 - vy2) / _epsilon
1368 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / numpy.sqrt(__m-1)
1370 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
1371 mH = numpy.eye(__m) + EaY.transpose() @ (RI * EaY)
1372 Deltaw = - numpy.linalg.solve(mH,GradJ)
1377 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1382 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1384 Xn = vx1.reshape((__n,-1)) + numpy.sqrt(__m-1) * EaX @ Ta @ Ua
1385 #--------------------------
1387 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1389 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1390 Xn = CovarianceInflation( Xn,
1391 selfA._parameters["InflationType"],
1392 selfA._parameters["InflationFactor"],
1395 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
1396 #--------------------------
1398 if selfA._parameters["StoreInternalVariables"] \
1399 or selfA._toStore("CostFunctionJ") \
1400 or selfA._toStore("CostFunctionJb") \
1401 or selfA._toStore("CostFunctionJo") \
1402 or selfA._toStore("APosterioriCovariance") \
1403 or selfA._toStore("InnovationAtCurrentAnalysis") \
1404 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1405 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1406 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1407 _Innovation = Ynpu - _HXa
1409 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1410 # ---> avec analysis
1411 selfA.StoredVariables["Analysis"].store( Xa )
1412 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1413 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1414 if selfA._toStore("InnovationAtCurrentAnalysis"):
1415 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1416 # ---> avec current state
1417 if selfA._parameters["StoreInternalVariables"] \
1418 or selfA._toStore("CurrentState"):
1419 selfA.StoredVariables["CurrentState"].store( Xn )
1420 if selfA._toStore("ForecastState"):
1421 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1422 if selfA._toStore("BMA"):
1423 selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
1424 #~ if selfA._toStore("InnovationAtCurrentState"):
1425 #~ selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1426 #~ if selfA._toStore("SimulatedObservationAtCurrentState") \
1427 #~ or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1428 #~ selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1430 if selfA._parameters["StoreInternalVariables"] \
1431 or selfA._toStore("CostFunctionJ") \
1432 or selfA._toStore("CostFunctionJb") \
1433 or selfA._toStore("CostFunctionJo") \
1434 or selfA._toStore("CurrentOptimum") \
1435 or selfA._toStore("APosterioriCovariance"):
1436 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1437 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1439 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1440 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1441 selfA.StoredVariables["CostFunctionJ" ].store( J )
1443 if selfA._toStore("IndexOfOptimum") \
1444 or selfA._toStore("CurrentOptimum") \
1445 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1446 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1447 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1448 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1449 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1450 if selfA._toStore("IndexOfOptimum"):
1451 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1452 if selfA._toStore("CurrentOptimum"):
1453 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1454 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1455 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1456 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1457 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1458 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1459 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1460 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1461 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1462 if selfA._toStore("APosterioriCovariance"):
1463 Eai = numpy.asarray((Xn - Xa.reshape((__n,-1))) / numpy.sqrt(__m-1)) # Anomalies
1465 Pn = 0.5 * (Pn + Pn.T)
1466 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1467 if selfA._parameters["EstimationOf"] == "Parameters" \
1468 and J < previousJMinimum:
1469 previousJMinimum = J
1471 if selfA._toStore("APosterioriCovariance"):
1472 covarianceXaMin = Pn
1474 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1475 # ----------------------------------------------------------------------
1476 if selfA._parameters["EstimationOf"] == "Parameters":
1477 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1478 selfA.StoredVariables["Analysis"].store( XaMin )
1479 if selfA._toStore("APosterioriCovariance"):
1480 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1481 if selfA._toStore("BMA"):
1482 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1486 # ==============================================================================
1487 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
1488 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1490 Iterative EnKF (Sakov 2012, Sakov 2018)
1492 selfA est identique au "self" d'algorithme appelant et contient les
1495 if selfA._parameters["EstimationOf"] == "Parameters":
1496 selfA._parameters["StoreInternalVariables"] = True
1500 H = HO["Direct"].appliedControledFormTo
1502 if selfA._parameters["EstimationOf"] == "State":
1503 M = EM["Direct"].appliedControledFormTo
1505 if CM is not None and "Tangent" in CM and U is not None:
1506 Cm = CM["Tangent"].asMatrix(Xb)
1510 # Nombre de pas identique au nombre de pas d'observations
1511 # -------------------------------------------------------
1512 if hasattr(Y,"stepnumber"):
1513 duration = Y.stepnumber()
1514 __p = numpy.cumprod(Y.shape())[-1]
1517 __p = numpy.array(Y).size
1519 # Précalcul des inversions de B et R
1520 # ----------------------------------
1521 if selfA._parameters["StoreInternalVariables"] \
1522 or selfA._toStore("CostFunctionJ") \
1523 or selfA._toStore("CostFunctionJb") \
1524 or selfA._toStore("CostFunctionJo") \
1525 or selfA._toStore("CurrentOptimum") \
1526 or selfA._toStore("APosterioriCovariance"):
1533 __m = selfA._parameters["NumberOfMembers"]
1534 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1536 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1538 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
1540 Xn = BackgroundEnsembleGeneration( Xb, Pn, __m )
1542 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1543 selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) )
1544 if selfA._toStore("APosterioriCovariance"):
1545 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1548 previousJMinimum = numpy.finfo(float).max
1550 for step in range(duration-1):
1551 if hasattr(Y,"store"):
1552 Ynpu = numpy.ravel( Y[step+1] )[:,None]
1554 Ynpu = numpy.ravel( Y )[:,None]
1557 if hasattr(U,"store") and len(U)>1:
1558 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1559 elif hasattr(U,"store") and len(U)==1:
1560 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1562 Un = numpy.asmatrix(numpy.ravel( U )).T
1566 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1567 Xn = CovarianceInflation( Xn,
1568 selfA._parameters["InflationType"],
1569 selfA._parameters["InflationFactor"],
1572 #--------------------------
1573 if VariantM == "IEnKF12":
1574 Xfm = numpy.asarray(Xn.mean(axis=1, dtype=mfp).astype('float'))
1575 EaX = numpy.asarray((Xn - Xfm.reshape((__n,-1))) / numpy.sqrt(__m-1))
1576 # EaX = EnsembleCenteredAnomalies( Xn ) / numpy.sqrt(__m-1)
1581 vw = numpy.zeros(__m)
1582 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1583 vx1 = numpy.ravel(Xfm) + EaX @ vw
1586 E1 = vx1.reshape((__n,-1)) + _epsilon * EaX
1588 E1 = vx1.reshape((__n,-1)) + numpy.sqrt(__m-1) * EaX @ Ta
1590 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
1591 E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1593 returnSerieAsArrayMatrix = True )
1594 elif selfA._parameters["EstimationOf"] == "Parameters":
1595 # --- > Par principe, M = Id
1597 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
1598 vy1 = H((vx2, Un)).reshape((__p,-1))
1600 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
1602 returnSerieAsArrayMatrix = True )
1603 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
1606 EaY = (HE2 - vy2) / _epsilon
1608 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / numpy.sqrt(__m-1)
1610 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
1611 mH = numpy.eye(__m) + EaY.transpose() @ (RI * EaY)
1612 Deltaw = - numpy.linalg.solve(mH,GradJ)
1617 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1621 A2 = EnsembleCenteredAnomalies( E2 )
1624 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1625 A2 = numpy.sqrt(__m-1) * A2 @ Ta / _epsilon
1628 #--------------------------
1630 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1632 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1633 Xn = CovarianceInflation( Xn,
1634 selfA._parameters["InflationType"],
1635 selfA._parameters["InflationFactor"],
1638 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
1639 #--------------------------
1641 if selfA._parameters["StoreInternalVariables"] \
1642 or selfA._toStore("CostFunctionJ") \
1643 or selfA._toStore("CostFunctionJb") \
1644 or selfA._toStore("CostFunctionJo") \
1645 or selfA._toStore("APosterioriCovariance") \
1646 or selfA._toStore("InnovationAtCurrentAnalysis") \
1647 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1648 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1649 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1650 _Innovation = Ynpu - _HXa
1652 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1653 # ---> avec analysis
1654 selfA.StoredVariables["Analysis"].store( Xa )
1655 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1656 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1657 if selfA._toStore("InnovationAtCurrentAnalysis"):
1658 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1659 # ---> avec current state
1660 if selfA._parameters["StoreInternalVariables"] \
1661 or selfA._toStore("CurrentState"):
1662 selfA.StoredVariables["CurrentState"].store( Xn )
1663 #~ if selfA._toStore("ForecastState"):
1664 #~ selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1665 #~ if selfA._toStore("BMA"):
1666 #~ selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
1667 #~ if selfA._toStore("InnovationAtCurrentState"):
1668 #~ selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu.reshape((__p,-1)) )
1669 #~ if selfA._toStore("SimulatedObservationAtCurrentState") \
1670 #~ or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1671 #~ selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1673 if selfA._parameters["StoreInternalVariables"] \
1674 or selfA._toStore("CostFunctionJ") \
1675 or selfA._toStore("CostFunctionJb") \
1676 or selfA._toStore("CostFunctionJo") \
1677 or selfA._toStore("CurrentOptimum") \
1678 or selfA._toStore("APosterioriCovariance"):
1679 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1680 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1682 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1683 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1684 selfA.StoredVariables["CostFunctionJ" ].store( J )
1686 if selfA._toStore("IndexOfOptimum") \
1687 or selfA._toStore("CurrentOptimum") \
1688 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1689 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1690 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1691 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1692 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1693 if selfA._toStore("IndexOfOptimum"):
1694 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1695 if selfA._toStore("CurrentOptimum"):
1696 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1697 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1698 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1699 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1700 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1701 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1702 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1703 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1704 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1705 if selfA._toStore("APosterioriCovariance"):
1706 Eai = numpy.asarray((Xn - Xa.reshape((__n,-1))) / numpy.sqrt(__m-1)) # Anomalies
1708 Pn = 0.5 * (Pn + Pn.T)
1709 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1710 if selfA._parameters["EstimationOf"] == "Parameters" \
1711 and J < previousJMinimum:
1712 previousJMinimum = J
1714 if selfA._toStore("APosterioriCovariance"):
1715 covarianceXaMin = Pn
1717 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1718 # ----------------------------------------------------------------------
1719 if selfA._parameters["EstimationOf"] == "Parameters":
1720 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1721 selfA.StoredVariables["Analysis"].store( XaMin )
1722 if selfA._toStore("APosterioriCovariance"):
1723 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1724 if selfA._toStore("BMA"):
1725 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1729 # ==============================================================================
1730 if __name__ == "__main__":
1731 print('\n AUTODIAGNOSTIC\n')