1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les objets numériques génériques.
26 __author__ = "Jean-Philippe ARGAUD"
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
36 # ==============================================================================
37 def ExecuteFunction( paire ):
38 assert len(paire) == 2, "Incorrect number of arguments"
40 __X = numpy.asmatrix(numpy.ravel( X )).T
41 __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42 __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43 __fonction = getattr(__module,funcrepr["__userFunction__name"])
44 sys.path = __sys_path_tmp ; del __sys_path_tmp
45 __HX = __fonction( __X )
46 return numpy.ravel( __HX )
48 # ==============================================================================
49 class FDApproximation(object):
51 Cette classe sert d'interface pour définir les opérateurs approximés. A la
52 création d'un objet, en fournissant une fonction "Function", on obtient un
53 objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
54 "AdjointOperator". On contrôle l'approximation DF avec l'incrément
55 multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
56 "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
57 centrées si le booléen "centeredDF" est vrai.
60 name = "FDApproximation",
65 avoidingRedundancy = True,
66 toleranceInRedundancy = 1.e-18,
67 lenghtOfRedundancy = -1,
72 self.__name = str(name)
75 import multiprocessing
76 self.__mpEnabled = True
78 self.__mpEnabled = False
80 self.__mpEnabled = False
81 self.__mpWorkers = mpWorkers
82 if self.__mpWorkers is not None and self.__mpWorkers < 1:
83 self.__mpWorkers = None
84 logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
87 self.__mfEnabled = True
89 self.__mfEnabled = False
90 logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
92 if avoidingRedundancy:
94 self.__tolerBP = float(toleranceInRedundancy)
95 self.__lenghtRJ = int(lenghtOfRedundancy)
96 self.__listJPCP = [] # Jacobian Previous Calculated Points
97 self.__listJPCI = [] # Jacobian Previous Calculated Increment
98 self.__listJPCR = [] # Jacobian Previous Calculated Results
99 self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
100 self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
102 self.__avoidRC = False
105 if isinstance(Function,types.FunctionType):
106 logging.debug("FDA Calculs en multiprocessing : FunctionType")
107 self.__userFunction__name = Function.__name__
109 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
111 mod = os.path.abspath(Function.__globals__['__file__'])
112 if not os.path.isfile(mod):
113 raise ImportError("No user defined function or method found with the name %s"%(mod,))
114 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
115 self.__userFunction__path = os.path.dirname(mod)
117 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
118 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
119 elif isinstance(Function,types.MethodType):
120 logging.debug("FDA Calculs en multiprocessing : MethodType")
121 self.__userFunction__name = Function.__name__
123 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
125 mod = os.path.abspath(Function.__func__.__globals__['__file__'])
126 if not os.path.isfile(mod):
127 raise ImportError("No user defined function or method found with the name %s"%(mod,))
128 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
129 self.__userFunction__path = os.path.dirname(mod)
131 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
132 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
134 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
136 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
137 self.__userFunction = self.__userOperator.appliedTo
139 self.__centeredDF = bool(centeredDF)
140 if abs(float(increment)) > 1.e-15:
141 self.__increment = float(increment)
143 self.__increment = 0.01
147 self.__dX = numpy.asmatrix(numpy.ravel( dX )).T
148 logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
150 logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
152 # ---------------------------------------------------------
153 def __doublon__(self, e, l, n, v=None):
154 __ac, __iac = False, -1
155 for i in range(len(l)-1,-1,-1):
156 if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
157 __ac, __iac = True, i
158 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
162 # ---------------------------------------------------------
163 def DirectOperator(self, X ):
165 Calcul du direct à l'aide de la fonction fournie.
167 logging.debug("FDA Calcul DirectOperator (explicite)")
169 _HX = self.__userFunction( X, argsAsSerie = True )
171 _X = numpy.asmatrix(numpy.ravel( X )).T
172 _HX = numpy.ravel(self.__userFunction( _X ))
176 # ---------------------------------------------------------
177 def TangentMatrix(self, X ):
179 Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
180 c'est-à-dire le gradient de H en X. On utilise des différences finies
181 directionnelles autour du point X. X est un numpy.matrix.
183 Différences finies centrées (approximation d'ordre 2):
184 1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
185 dX[i] à la composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
186 on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
188 2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
190 3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
192 Différences finies non centrées (approximation d'ordre 1):
193 1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
194 composante X[i] pour composer X_plus_dXi, et on calcule la réponse
195 HX_plus_dXi = H( X_plus_dXi )
196 2/ On calcule la valeur centrale HX = H(X)
197 3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
199 4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
202 logging.debug("FDA Début du calcul de la Jacobienne")
203 logging.debug("FDA Incrément de............: %s*X"%float(self.__increment))
204 logging.debug("FDA Approximation centrée...: %s"%(self.__centeredDF))
206 if X is None or len(X)==0:
207 raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
209 _X = numpy.asmatrix(numpy.ravel( X )).T
211 if self.__dX is None:
212 _dX = self.__increment * _X
214 _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
216 if (_dX == 0.).any():
219 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
221 _dX = numpy.where( _dX == 0., moyenne, _dX )
223 __alreadyCalculated = False
225 __bidon, __alreadyCalculatedP = self.__doublon__(_X, self.__listJPCP, self.__listJPPN, None)
226 __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
227 if __alreadyCalculatedP == __alreadyCalculatedI > -1:
228 __alreadyCalculated, __i = True, __alreadyCalculatedP
229 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
231 if __alreadyCalculated:
232 logging.debug("FDA Calcul Jacobienne (par récupération du doublon %i)"%__i)
233 _Jacobienne = self.__listJPCR[__i]
235 logging.debug("FDA Calcul Jacobienne (explicite)")
236 if self.__centeredDF:
238 if self.__mpEnabled and not self.__mfEnabled:
240 "__userFunction__path" : self.__userFunction__path,
241 "__userFunction__modl" : self.__userFunction__modl,
242 "__userFunction__name" : self.__userFunction__name,
245 for i in range( len(_dX) ):
247 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
248 _X_plus_dXi[i] = _X[i] + _dXi
249 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
250 _X_moins_dXi[i] = _X[i] - _dXi
252 _jobs.append( (_X_plus_dXi, funcrepr) )
253 _jobs.append( (_X_moins_dXi, funcrepr) )
255 import multiprocessing
256 self.__pool = multiprocessing.Pool(self.__mpWorkers)
257 _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
262 for i in range( len(_dX) ):
263 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
265 elif self.__mfEnabled:
267 for i in range( len(_dX) ):
269 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
270 _X_plus_dXi[i] = _X[i] + _dXi
271 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
272 _X_moins_dXi[i] = _X[i] - _dXi
274 _xserie.append( _X_plus_dXi )
275 _xserie.append( _X_moins_dXi )
277 _HX_plusmoins_dX = self.DirectOperator( _xserie )
280 for i in range( len(_dX) ):
281 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
285 for i in range( _dX.size ):
287 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
288 _X_plus_dXi[i] = _X[i] + _dXi
289 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
290 _X_moins_dXi[i] = _X[i] - _dXi
292 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
293 _HX_moins_dXi = self.DirectOperator( _X_moins_dXi )
295 _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
299 if self.__mpEnabled and not self.__mfEnabled:
301 "__userFunction__path" : self.__userFunction__path,
302 "__userFunction__modl" : self.__userFunction__modl,
303 "__userFunction__name" : self.__userFunction__name,
306 _jobs.append( (_X.A1, funcrepr) )
307 for i in range( len(_dX) ):
308 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
309 _X_plus_dXi[i] = _X[i] + _dX[i]
311 _jobs.append( (_X_plus_dXi, funcrepr) )
313 import multiprocessing
314 self.__pool = multiprocessing.Pool(self.__mpWorkers)
315 _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
319 _HX = _HX_plus_dX.pop(0)
322 for i in range( len(_dX) ):
323 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
325 elif self.__mfEnabled:
327 _xserie.append( _X.A1 )
328 for i in range( len(_dX) ):
329 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
330 _X_plus_dXi[i] = _X[i] + _dX[i]
332 _xserie.append( _X_plus_dXi )
334 _HX_plus_dX = self.DirectOperator( _xserie )
336 _HX = _HX_plus_dX.pop(0)
339 for i in range( len(_dX) ):
340 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
344 _HX = self.DirectOperator( _X )
345 for i in range( _dX.size ):
347 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
348 _X_plus_dXi[i] = _X[i] + _dXi
350 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
352 _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
355 _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
357 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
358 while len(self.__listJPCP) > self.__lenghtRJ:
359 self.__listJPCP.pop(0)
360 self.__listJPCI.pop(0)
361 self.__listJPCR.pop(0)
362 self.__listJPPN.pop(0)
363 self.__listJPIN.pop(0)
364 self.__listJPCP.append( copy.copy(_X) )
365 self.__listJPCI.append( copy.copy(_dX) )
366 self.__listJPCR.append( copy.copy(_Jacobienne) )
367 self.__listJPPN.append( numpy.linalg.norm(_X) )
368 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
370 logging.debug("FDA Fin du calcul de la Jacobienne")
374 # ---------------------------------------------------------
375 def TangentOperator(self, paire ):
377 Calcul du tangent à l'aide de la Jacobienne.
380 assert len(paire) == 1, "Incorrect lenght of arguments"
382 assert len(_paire) == 2, "Incorrect number of arguments"
384 assert len(paire) == 2, "Incorrect number of arguments"
387 _Jacobienne = self.TangentMatrix( X )
388 if dX is None or len(dX) == 0:
390 # Calcul de la forme matricielle si le second argument est None
391 # -------------------------------------------------------------
392 if self.__mfEnabled: return [_Jacobienne,]
393 else: return _Jacobienne
396 # Calcul de la valeur linéarisée de H en X appliqué à dX
397 # ------------------------------------------------------
398 _dX = numpy.asmatrix(numpy.ravel( dX )).T
399 _HtX = numpy.dot(_Jacobienne, _dX)
400 if self.__mfEnabled: return [_HtX.A1,]
403 # ---------------------------------------------------------
404 def AdjointOperator(self, paire ):
406 Calcul de l'adjoint à l'aide de la Jacobienne.
409 assert len(paire) == 1, "Incorrect lenght of arguments"
411 assert len(_paire) == 2, "Incorrect number of arguments"
413 assert len(paire) == 2, "Incorrect number of arguments"
416 _JacobienneT = self.TangentMatrix( X ).T
417 if Y is None or len(Y) == 0:
419 # Calcul de la forme matricielle si le second argument est None
420 # -------------------------------------------------------------
421 if self.__mfEnabled: return [_JacobienneT,]
422 else: return _JacobienneT
425 # Calcul de la valeur de l'adjoint en X appliqué à Y
426 # --------------------------------------------------
427 _Y = numpy.asmatrix(numpy.ravel( Y )).T
428 _HaY = numpy.dot(_JacobienneT, _Y)
429 if self.__mfEnabled: return [_HaY.A1,]
432 # ==============================================================================
444 Implémentation informatique de l'algorithme MMQR, basée sur la publication :
445 David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
446 Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
449 # Recuperation des donnees et informations initiales
450 # --------------------------------------------------
451 variables = numpy.ravel( x0 )
452 mesures = numpy.ravel( y )
453 increment = sys.float_info[0]
456 quantile = float(quantile)
458 # Calcul des parametres du MM
459 # ---------------------------
460 tn = float(toler) / n
461 e0 = -tn / math.log(tn)
462 epsilon = (e0-tn)/(1+math.log(e0))
464 # Calculs d'initialisation
465 # ------------------------
466 residus = mesures - numpy.ravel( func( variables ) )
467 poids = 1./(epsilon+numpy.abs(residus))
468 veps = 1. - 2. * quantile - residus * poids
469 lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
472 # Recherche iterative
473 # -------------------
474 while (increment > toler) and (iteration < maxfun) :
477 Derivees = numpy.array(fprime(variables))
478 Derivees = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
479 DeriveesT = Derivees.transpose()
480 M = numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
481 SM = numpy.transpose(numpy.dot( DeriveesT , veps ))
482 step = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
484 variables = variables + step
485 if bounds is not None:
486 # Attention : boucle infinie à éviter si un intervalle est trop petit
487 while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
489 variables = variables - step
490 residus = mesures - numpy.ravel( func(variables) )
491 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
493 while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
495 variables = variables - step
496 residus = mesures - numpy.ravel( func(variables) )
497 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
499 increment = lastsurrogate-surrogate
500 poids = 1./(epsilon+numpy.abs(residus))
501 veps = 1. - 2. * quantile - residus * poids
502 lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
506 Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
508 return variables, Ecart, [n,p,iteration,increment,0]
510 # ==============================================================================
511 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
512 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
514 _bgcenter = numpy.ravel(_bgcenter)[:,None]
516 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
518 if _bgcovariance is None:
519 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
521 _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
522 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
524 return BackgroundEnsemble
526 # ==============================================================================
527 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
528 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
529 def __CenteredRandomAnomalies(Zr, N):
531 Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
532 notes manuscrites de MB et conforme au code de PS avec eps = -1
535 Q = numpy.eye(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
536 Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
537 R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
542 _bgcenter = numpy.ravel(_bgcenter)[:,None]
544 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
545 if _bgcovariance is None:
546 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
549 U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
550 _nbctl = _bgcenter.size
551 if _nbmembers > _nbctl:
552 _Z = numpy.concatenate((numpy.dot(
553 numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
554 numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
556 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
557 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
558 BackgroundEnsemble = _bgcenter + _Zca
560 if max(abs(_bgcovariance.flatten())) > 0:
561 _nbctl = _bgcenter.size
562 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
563 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
564 BackgroundEnsemble = _bgcenter + _Zca
566 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
568 return BackgroundEnsemble
570 # ==============================================================================
571 def EnsembleOfAnomalies( _ensemble, _optmean = None):
572 "Renvoie les anomalies centrées à partir d'un ensemble TailleEtat*NbMembres"
574 Em = numpy.asarray(_ensemble).mean(axis=1, dtype=mfp).astype('float')[:,numpy.newaxis]
576 Em = numpy.ravel(_optmean)[:,numpy.newaxis]
578 return numpy.asarray(_ensemble) - Em
580 # ==============================================================================
581 def CovarianceInflation(
583 InflationType = None,
584 InflationFactor = None,
585 BackgroundCov = None,
588 Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
590 Synthèse : Hunt 2007, section 2.3.5
592 if InflationFactor is None:
595 InflationFactor = float(InflationFactor)
597 if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
598 if InflationFactor < 1.:
599 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
600 if InflationFactor < 1.+mpr:
602 OutputCovOrEns = InflationFactor**2 * InputCovOrEns
604 elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
605 if InflationFactor < 1.:
606 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
607 if InflationFactor < 1.+mpr:
609 InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
610 OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
611 + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
613 elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
614 if InflationFactor < 0.:
615 raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
616 if InflationFactor < mpr:
618 __n, __m = numpy.asarray(InputCovOrEns).shape
620 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
621 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.eye(__n)
623 elif InflationType == "HybridOnBackgroundCovariance":
624 if InflationFactor < 0.:
625 raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
626 if InflationFactor < mpr:
628 __n, __m = numpy.asarray(InputCovOrEns).shape
630 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
631 if BackgroundCov is None:
632 raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
633 if InputCovOrEns.shape != BackgroundCov.shape:
634 raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
635 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
637 elif InflationType == "Relaxation":
638 raise NotImplementedError("InflationType Relaxation")
641 raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
643 return OutputCovOrEns
645 # ==============================================================================
646 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
648 Chapeau : 3DVAR multi-pas et multi-méthodes
653 Xn = numpy.ravel(Xb).reshape((-1,1))
655 if selfA._parameters["EstimationOf"] == "State":
656 M = EM["Direct"].appliedTo
658 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
659 selfA.StoredVariables["Analysis"].store( Xn )
660 if selfA._toStore("APosterioriCovariance"):
661 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(Xn.size)
663 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
664 if selfA._toStore("ForecastState"):
665 selfA.StoredVariables["ForecastState"].store( Xn )
667 if hasattr(Y,"stepnumber"):
668 duration = Y.stepnumber()
674 for step in range(duration-1):
675 if hasattr(Y,"store"):
676 Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
678 Ynpu = numpy.ravel( Y ).reshape((-1,1))
680 if selfA._parameters["EstimationOf"] == "State": # Forecast
681 Xn = selfA.StoredVariables["Analysis"][-1]
682 Xn_predicted = M( Xn )
683 if selfA._toStore("ForecastState"):
684 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
685 elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
686 # --- > Par principe, M = Id, Q = 0
688 Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
690 oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
694 # ==============================================================================
695 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
697 3DVAR (Bouttier 1999, Courtier 1993)
699 selfA est identique au "self" d'algorithme appelant et contient les
703 # Correction pour pallier a un bug de TNC sur le retour du Minimum
704 if "Minimizer" in selfA._parameters and selfA._parameters["Minimizer"] == "TNC":
705 selfA.setParameterValue("StoreInternalVariables",True)
709 Hm = HO["Direct"].appliedTo
710 Ha = HO["Adjoint"].appliedInXTo
712 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
713 # ----------------------------------------------------
714 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
715 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
718 HXb = numpy.asmatrix(numpy.ravel( HXb )).T
719 if Y.size != HXb.size:
720 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
721 if max(Y.shape) != max(HXb.shape):
722 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
724 if selfA._toStore("JacobianMatrixAtBackground"):
725 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
726 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
727 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
729 # Précalcul des inversions de B et R
730 # ----------------------------------
734 # Point de démarrage de l'optimisation
735 # ------------------------------------
736 Xini = selfA._parameters["InitializationPoint"]
738 # Définition de la fonction-coût
739 # ------------------------------
741 _X = numpy.asmatrix(numpy.ravel( x )).T
742 if selfA._parameters["StoreInternalVariables"] or \
743 selfA._toStore("CurrentState") or \
744 selfA._toStore("CurrentOptimum"):
745 selfA.StoredVariables["CurrentState"].store( _X )
747 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
748 _Innovation = Y - _HX
749 if selfA._toStore("SimulatedObservationAtCurrentState") or \
750 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
751 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
752 if selfA._toStore("InnovationAtCurrentState"):
753 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
755 Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
756 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
759 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
760 selfA.StoredVariables["CostFunctionJb"].store( Jb )
761 selfA.StoredVariables["CostFunctionJo"].store( Jo )
762 selfA.StoredVariables["CostFunctionJ" ].store( J )
763 if selfA._toStore("IndexOfOptimum") or \
764 selfA._toStore("CurrentOptimum") or \
765 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
766 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
767 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
768 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
769 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
770 if selfA._toStore("IndexOfOptimum"):
771 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
772 if selfA._toStore("CurrentOptimum"):
773 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
774 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
775 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
776 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
777 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
778 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
779 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
780 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
781 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
784 def GradientOfCostFunction(x):
785 _X = numpy.asmatrix(numpy.ravel( x )).T
787 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
788 GradJb = BI * (_X - Xb)
789 GradJo = - Ha( (_X, RI * (Y - _HX)) )
790 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
793 # Minimisation de la fonctionnelle
794 # --------------------------------
795 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
797 if selfA._parameters["Minimizer"] == "LBFGSB":
798 if "0.19" <= scipy.version.version <= "1.1.0":
799 import lbfgsbhlt as optimiseur
801 import scipy.optimize as optimiseur
802 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
805 fprime = GradientOfCostFunction,
807 bounds = selfA._parameters["Bounds"],
808 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
809 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
810 pgtol = selfA._parameters["ProjectedGradientTolerance"],
811 iprint = selfA._parameters["optiprint"],
813 nfeval = Informations['funcalls']
814 rc = Informations['warnflag']
815 elif selfA._parameters["Minimizer"] == "TNC":
816 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
819 fprime = GradientOfCostFunction,
821 bounds = selfA._parameters["Bounds"],
822 maxfun = selfA._parameters["MaximumNumberOfSteps"],
823 pgtol = selfA._parameters["ProjectedGradientTolerance"],
824 ftol = selfA._parameters["CostDecrementTolerance"],
825 messages = selfA._parameters["optmessages"],
827 elif selfA._parameters["Minimizer"] == "CG":
828 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
831 fprime = GradientOfCostFunction,
833 maxiter = selfA._parameters["MaximumNumberOfSteps"],
834 gtol = selfA._parameters["GradientNormTolerance"],
835 disp = selfA._parameters["optdisp"],
838 elif selfA._parameters["Minimizer"] == "NCG":
839 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
842 fprime = GradientOfCostFunction,
844 maxiter = selfA._parameters["MaximumNumberOfSteps"],
845 avextol = selfA._parameters["CostDecrementTolerance"],
846 disp = selfA._parameters["optdisp"],
849 elif selfA._parameters["Minimizer"] == "BFGS":
850 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
853 fprime = GradientOfCostFunction,
855 maxiter = selfA._parameters["MaximumNumberOfSteps"],
856 gtol = selfA._parameters["GradientNormTolerance"],
857 disp = selfA._parameters["optdisp"],
861 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
863 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
864 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
866 # Correction pour pallier a un bug de TNC sur le retour du Minimum
867 # ----------------------------------------------------------------
868 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
869 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
871 # Obtention de l'analyse
872 # ----------------------
873 Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
875 selfA.StoredVariables["Analysis"].store( Xa.A1 )
877 if selfA._toStore("OMA") or \
878 selfA._toStore("SigmaObs2") or \
879 selfA._toStore("SimulationQuantiles") or \
880 selfA._toStore("SimulatedObservationAtOptimum"):
881 if selfA._toStore("SimulatedObservationAtCurrentState"):
882 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
883 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
884 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
888 # Calcul de la covariance d'analyse
889 # ---------------------------------
890 if selfA._toStore("APosterioriCovariance") or \
891 selfA._toStore("SimulationQuantiles") or \
892 selfA._toStore("JacobianMatrixAtOptimum") or \
893 selfA._toStore("KalmanGainAtOptimum"):
894 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
895 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
896 if selfA._toStore("APosterioriCovariance") or \
897 selfA._toStore("SimulationQuantiles") or \
898 selfA._toStore("KalmanGainAtOptimum"):
899 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
900 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
901 if selfA._toStore("APosterioriCovariance") or \
902 selfA._toStore("SimulationQuantiles"):
906 _ee = numpy.matrix(numpy.zeros(nb)).T
908 _HtEE = numpy.dot(HtM,_ee)
909 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
910 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
911 HessienneI = numpy.matrix( HessienneI )
913 if min(A.shape) != max(A.shape):
914 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
915 if (numpy.diag(A) < 0).any():
916 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
917 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
919 L = numpy.linalg.cholesky( A )
921 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
922 if selfA._toStore("APosterioriCovariance"):
923 selfA.StoredVariables["APosterioriCovariance"].store( A )
924 if selfA._toStore("JacobianMatrixAtOptimum"):
925 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
926 if selfA._toStore("KalmanGainAtOptimum"):
927 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
928 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
929 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
931 # Calculs et/ou stockages supplémentaires
932 # ---------------------------------------
933 if selfA._toStore("Innovation") or \
934 selfA._toStore("SigmaObs2") or \
935 selfA._toStore("MahalanobisConsistency") or \
936 selfA._toStore("OMB"):
938 if selfA._toStore("Innovation"):
939 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
940 if selfA._toStore("BMA"):
941 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
942 if selfA._toStore("OMA"):
943 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
944 if selfA._toStore("OMB"):
945 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
946 if selfA._toStore("SigmaObs2"):
947 TraceR = R.trace(Y.size)
948 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
949 if selfA._toStore("MahalanobisConsistency"):
950 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
951 if selfA._toStore("SimulationQuantiles"):
952 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
953 HXa = numpy.matrix(numpy.ravel( HXa )).T
955 for i in range(nech):
956 if selfA._parameters["SimulationForQuantiles"] == "Linear":
957 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
958 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
960 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
961 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
962 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
966 YfQ = numpy.hstack((YfQ,Yr))
969 for quantile in selfA._parameters["Quantiles"]:
970 if not (0. <= float(quantile) <= 1.): continue
971 indice = int(nech * float(quantile) - 1./nech)
972 if YQ is None: YQ = YfQ[:,indice]
973 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
974 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
975 if selfA._toStore("SimulatedObservationAtBackground"):
976 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
977 if selfA._toStore("SimulatedObservationAtOptimum"):
978 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
982 # ==============================================================================
983 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
985 3DVAR variational analysis with no inversion of B (Huang 2000)
987 selfA est identique au "self" d'algorithme appelant et contient les
991 # Correction pour pallier a un bug de TNC sur le retour du Minimum
992 if "Minimizer" in selfA._parameters and selfA._parameters["Minimizer"] == "TNC":
993 selfA.setParameterValue("StoreInternalVariables",True)
997 Hm = HO["Direct"].appliedTo
998 Ha = HO["Adjoint"].appliedInXTo
1000 # Précalcul des inversions de B et R
1004 # Point de démarrage de l'optimisation
1005 Xini = numpy.zeros(Xb.shape)
1007 # Définition de la fonction-coût
1008 # ------------------------------
1009 def CostFunction(v):
1010 _V = numpy.asmatrix(numpy.ravel( v )).T
1012 if selfA._parameters["StoreInternalVariables"] or \
1013 selfA._toStore("CurrentState") or \
1014 selfA._toStore("CurrentOptimum"):
1015 selfA.StoredVariables["CurrentState"].store( _X )
1017 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
1018 _Innovation = Y - _HX
1019 if selfA._toStore("SimulatedObservationAtCurrentState") or \
1020 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1021 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
1022 if selfA._toStore("InnovationAtCurrentState"):
1023 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1025 Jb = float( 0.5 * _V.T * BT * _V )
1026 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1029 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1030 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1031 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1032 selfA.StoredVariables["CostFunctionJ" ].store( J )
1033 if selfA._toStore("IndexOfOptimum") or \
1034 selfA._toStore("CurrentOptimum") or \
1035 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1036 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1037 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1038 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1039 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1040 if selfA._toStore("IndexOfOptimum"):
1041 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1042 if selfA._toStore("CurrentOptimum"):
1043 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1044 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1045 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1046 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1047 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1048 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1049 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1050 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1051 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1054 def GradientOfCostFunction(v):
1055 _V = numpy.asmatrix(numpy.ravel( v )).T
1058 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
1060 GradJo = - Ha( (_X, RI * (Y - _HX)) )
1061 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1064 # Minimisation de la fonctionnelle
1065 # --------------------------------
1066 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1068 if selfA._parameters["Minimizer"] == "LBFGSB":
1069 if "0.19" <= scipy.version.version <= "1.1.0":
1070 import lbfgsbhlt as optimiseur
1072 import scipy.optimize as optimiseur
1073 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1074 func = CostFunction,
1076 fprime = GradientOfCostFunction,
1078 bounds = selfA._parameters["Bounds"],
1079 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
1080 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
1081 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1082 iprint = selfA._parameters["optiprint"],
1084 nfeval = Informations['funcalls']
1085 rc = Informations['warnflag']
1086 elif selfA._parameters["Minimizer"] == "TNC":
1087 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1088 func = CostFunction,
1090 fprime = GradientOfCostFunction,
1092 bounds = selfA._parameters["Bounds"],
1093 maxfun = selfA._parameters["MaximumNumberOfSteps"],
1094 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1095 ftol = selfA._parameters["CostDecrementTolerance"],
1096 messages = selfA._parameters["optmessages"],
1098 elif selfA._parameters["Minimizer"] == "CG":
1099 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1102 fprime = GradientOfCostFunction,
1104 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1105 gtol = selfA._parameters["GradientNormTolerance"],
1106 disp = selfA._parameters["optdisp"],
1109 elif selfA._parameters["Minimizer"] == "NCG":
1110 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1113 fprime = GradientOfCostFunction,
1115 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1116 avextol = selfA._parameters["CostDecrementTolerance"],
1117 disp = selfA._parameters["optdisp"],
1120 elif selfA._parameters["Minimizer"] == "BFGS":
1121 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1124 fprime = GradientOfCostFunction,
1126 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1127 gtol = selfA._parameters["GradientNormTolerance"],
1128 disp = selfA._parameters["optdisp"],
1132 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1134 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1135 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1137 # Correction pour pallier a un bug de TNC sur le retour du Minimum
1138 # ----------------------------------------------------------------
1139 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1140 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1141 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1143 Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
1145 # Obtention de l'analyse
1146 # ----------------------
1149 selfA.StoredVariables["Analysis"].store( Xa )
1151 if selfA._toStore("OMA") or \
1152 selfA._toStore("SigmaObs2") or \
1153 selfA._toStore("SimulationQuantiles") or \
1154 selfA._toStore("SimulatedObservationAtOptimum"):
1155 if selfA._toStore("SimulatedObservationAtCurrentState"):
1156 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1157 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1158 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1162 # Calcul de la covariance d'analyse
1163 # ---------------------------------
1164 if selfA._toStore("APosterioriCovariance") or \
1165 selfA._toStore("SimulationQuantiles") or \
1166 selfA._toStore("JacobianMatrixAtOptimum") or \
1167 selfA._toStore("KalmanGainAtOptimum"):
1168 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1169 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1170 if selfA._toStore("APosterioriCovariance") or \
1171 selfA._toStore("SimulationQuantiles") or \
1172 selfA._toStore("KalmanGainAtOptimum"):
1173 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1174 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1175 if selfA._toStore("APosterioriCovariance") or \
1176 selfA._toStore("SimulationQuantiles"):
1181 _ee = numpy.matrix(numpy.zeros(nb)).T
1183 _HtEE = numpy.dot(HtM,_ee)
1184 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
1185 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1186 HessienneI = numpy.matrix( HessienneI )
1188 if min(A.shape) != max(A.shape):
1189 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1190 if (numpy.diag(A) < 0).any():
1191 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1192 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1194 L = numpy.linalg.cholesky( A )
1196 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1197 if selfA._toStore("APosterioriCovariance"):
1198 selfA.StoredVariables["APosterioriCovariance"].store( A )
1199 if selfA._toStore("JacobianMatrixAtOptimum"):
1200 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1201 if selfA._toStore("KalmanGainAtOptimum"):
1202 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1203 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1204 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1206 # Calculs et/ou stockages supplémentaires
1207 # ---------------------------------------
1208 if selfA._toStore("Innovation") or \
1209 selfA._toStore("SigmaObs2") or \
1210 selfA._toStore("MahalanobisConsistency") or \
1211 selfA._toStore("OMB"):
1213 if selfA._toStore("Innovation"):
1214 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1215 if selfA._toStore("BMA"):
1216 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1217 if selfA._toStore("OMA"):
1218 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1219 if selfA._toStore("OMB"):
1220 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1221 if selfA._toStore("SigmaObs2"):
1222 TraceR = R.trace(Y.size)
1223 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1224 if selfA._toStore("MahalanobisConsistency"):
1225 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1226 if selfA._toStore("SimulationQuantiles"):
1227 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1228 HXa = numpy.matrix(numpy.ravel( HXa )).T
1230 for i in range(nech):
1231 if selfA._parameters["SimulationForQuantiles"] == "Linear":
1232 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1233 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1235 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1236 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1237 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1241 YfQ = numpy.hstack((YfQ,Yr))
1244 for quantile in selfA._parameters["Quantiles"]:
1245 if not (0. <= float(quantile) <= 1.): continue
1246 indice = int(nech * float(quantile) - 1./nech)
1247 if YQ is None: YQ = YfQ[:,indice]
1248 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
1249 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1250 if selfA._toStore("SimulatedObservationAtBackground"):
1251 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1252 if selfA._toStore("SimulatedObservationAtOptimum"):
1253 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1257 # ==============================================================================
1258 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1260 3DVAR incrémental (Courtier 1994, 1997)
1262 selfA est identique au "self" d'algorithme appelant et contient les
1266 # Correction pour pallier a un bug de TNC sur le retour du Minimum
1267 if "Minimizer" in selfA._parameters and selfA._parameters["Minimizer"] == "TNC":
1268 selfA.setParameterValue("StoreInternalVariables",True)
1273 # Opérateur non-linéaire pour la boucle externe
1274 Hm = HO["Direct"].appliedTo
1276 # Précalcul des inversions de B et R
1280 # Point de démarrage de l'optimisation
1281 Xini = selfA._parameters["InitializationPoint"]
1283 HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
1284 Innovation = Y - HXb
1291 Xr = Xini.reshape((-1,1))
1292 while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
1296 Ht = HO["Tangent"].asMatrix(Xr)
1297 Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
1299 # Définition de la fonction-coût
1300 # ------------------------------
1301 def CostFunction(dx):
1302 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1303 if selfA._parameters["StoreInternalVariables"] or \
1304 selfA._toStore("CurrentState") or \
1305 selfA._toStore("CurrentOptimum"):
1306 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
1308 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1309 _dInnovation = Innovation - _HdX
1310 if selfA._toStore("SimulatedObservationAtCurrentState") or \
1311 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1312 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
1313 if selfA._toStore("InnovationAtCurrentState"):
1314 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
1316 Jb = float( 0.5 * _dX.T * BI * _dX )
1317 Jo = float( 0.5 * _dInnovation.T * RI * _dInnovation )
1320 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1321 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1322 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1323 selfA.StoredVariables["CostFunctionJ" ].store( J )
1324 if selfA._toStore("IndexOfOptimum") or \
1325 selfA._toStore("CurrentOptimum") or \
1326 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1327 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1328 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1329 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1330 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1331 if selfA._toStore("IndexOfOptimum"):
1332 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1333 if selfA._toStore("CurrentOptimum"):
1334 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1335 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1336 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1337 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1338 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1339 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1340 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1341 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1342 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1345 def GradientOfCostFunction(dx):
1346 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1348 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1349 _dInnovation = Innovation - _HdX
1351 GradJo = - Ht.T @ (RI * _dInnovation)
1352 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1355 # Minimisation de la fonctionnelle
1356 # --------------------------------
1357 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1359 if selfA._parameters["Minimizer"] == "LBFGSB":
1360 # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1361 if "0.19" <= scipy.version.version <= "1.1.0":
1362 import lbfgsbhlt as optimiseur
1364 import scipy.optimize as optimiseur
1365 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1366 func = CostFunction,
1367 x0 = numpy.zeros(Xini.size),
1368 fprime = GradientOfCostFunction,
1370 bounds = selfA._parameters["Bounds"],
1371 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
1372 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
1373 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1374 iprint = selfA._parameters["optiprint"],
1376 nfeval = Informations['funcalls']
1377 rc = Informations['warnflag']
1378 elif selfA._parameters["Minimizer"] == "TNC":
1379 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1380 func = CostFunction,
1381 x0 = numpy.zeros(Xini.size),
1382 fprime = GradientOfCostFunction,
1384 bounds = selfA._parameters["Bounds"],
1385 maxfun = selfA._parameters["MaximumNumberOfSteps"],
1386 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1387 ftol = selfA._parameters["CostDecrementTolerance"],
1388 messages = selfA._parameters["optmessages"],
1390 elif selfA._parameters["Minimizer"] == "CG":
1391 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1393 x0 = numpy.zeros(Xini.size),
1394 fprime = GradientOfCostFunction,
1396 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1397 gtol = selfA._parameters["GradientNormTolerance"],
1398 disp = selfA._parameters["optdisp"],
1401 elif selfA._parameters["Minimizer"] == "NCG":
1402 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1404 x0 = numpy.zeros(Xini.size),
1405 fprime = GradientOfCostFunction,
1407 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1408 avextol = selfA._parameters["CostDecrementTolerance"],
1409 disp = selfA._parameters["optdisp"],
1412 elif selfA._parameters["Minimizer"] == "BFGS":
1413 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1415 x0 = numpy.zeros(Xini.size),
1416 fprime = GradientOfCostFunction,
1418 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1419 gtol = selfA._parameters["GradientNormTolerance"],
1420 disp = selfA._parameters["optdisp"],
1424 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1426 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1427 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1429 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1430 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1431 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1433 Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
1436 DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
1437 iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
1439 # Obtention de l'analyse
1440 # ----------------------
1443 selfA.StoredVariables["Analysis"].store( Xa )
1445 if selfA._toStore("OMA") or \
1446 selfA._toStore("SigmaObs2") or \
1447 selfA._toStore("SimulationQuantiles") or \
1448 selfA._toStore("SimulatedObservationAtOptimum"):
1449 if selfA._toStore("SimulatedObservationAtCurrentState"):
1450 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1451 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1452 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1456 # Calcul de la covariance d'analyse
1457 # ---------------------------------
1458 if selfA._toStore("APosterioriCovariance") or \
1459 selfA._toStore("SimulationQuantiles") or \
1460 selfA._toStore("JacobianMatrixAtOptimum") or \
1461 selfA._toStore("KalmanGainAtOptimum"):
1462 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1463 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1464 if selfA._toStore("APosterioriCovariance") or \
1465 selfA._toStore("SimulationQuantiles") or \
1466 selfA._toStore("KalmanGainAtOptimum"):
1467 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1468 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1469 if selfA._toStore("APosterioriCovariance") or \
1470 selfA._toStore("SimulationQuantiles"):
1474 _ee = numpy.matrix(numpy.zeros(nb)).T
1476 _HtEE = numpy.dot(HtM,_ee)
1477 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
1478 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1479 HessienneI = numpy.matrix( HessienneI )
1481 if min(A.shape) != max(A.shape):
1482 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1483 if (numpy.diag(A) < 0).any():
1484 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1485 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1487 L = numpy.linalg.cholesky( A )
1489 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1490 if selfA._toStore("APosterioriCovariance"):
1491 selfA.StoredVariables["APosterioriCovariance"].store( A )
1492 if selfA._toStore("JacobianMatrixAtOptimum"):
1493 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1494 if selfA._toStore("KalmanGainAtOptimum"):
1495 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1496 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1497 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1499 # Calculs et/ou stockages supplémentaires
1500 # ---------------------------------------
1501 if selfA._toStore("Innovation") or \
1502 selfA._toStore("SigmaObs2") or \
1503 selfA._toStore("MahalanobisConsistency") or \
1504 selfA._toStore("OMB"):
1506 if selfA._toStore("Innovation"):
1507 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1508 if selfA._toStore("BMA"):
1509 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1510 if selfA._toStore("OMA"):
1511 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1512 if selfA._toStore("OMB"):
1513 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1514 if selfA._toStore("SigmaObs2"):
1515 TraceR = R.trace(Y.size)
1516 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1517 if selfA._toStore("MahalanobisConsistency"):
1518 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1519 if selfA._toStore("SimulationQuantiles"):
1520 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1521 HXa = numpy.matrix(numpy.ravel( HXa )).T
1523 for i in range(nech):
1524 if selfA._parameters["SimulationForQuantiles"] == "Linear":
1525 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1526 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1528 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1529 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1530 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1534 YfQ = numpy.hstack((YfQ,Yr))
1537 for quantile in selfA._parameters["Quantiles"]:
1538 if not (0. <= float(quantile) <= 1.): continue
1539 indice = int(nech * float(quantile) - 1./nech)
1540 if YQ is None: YQ = YfQ[:,indice]
1541 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
1542 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1543 if selfA._toStore("SimulatedObservationAtBackground"):
1544 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1545 if selfA._toStore("SimulatedObservationAtOptimum"):
1546 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1550 # ==============================================================================
1551 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1553 3DVAR PSAS (Huang 2000)
1555 selfA est identique au "self" d'algorithme appelant et contient les
1559 # Correction pour pallier a un bug de TNC sur le retour du Minimum
1560 if "Minimizer" in selfA._parameters and selfA._parameters["Minimizer"] == "TNC":
1561 selfA.setParameterValue("StoreInternalVariables",True)
1567 Hm = HO["Direct"].appliedTo
1569 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
1570 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
1571 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
1574 HXb = numpy.asmatrix(numpy.ravel( HXb )).T
1575 if Y.size != HXb.size:
1576 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
1577 if max(Y.shape) != max(HXb.shape):
1578 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
1580 if selfA._toStore("JacobianMatrixAtBackground"):
1581 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
1582 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
1583 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
1585 Ht = HO["Tangent"].asMatrix(Xb)
1587 HBHTpR = R + Ht * BHT
1588 Innovation = Y - HXb
1590 # Point de démarrage de l'optimisation
1591 Xini = numpy.zeros(Xb.shape)
1593 # Définition de la fonction-coût
1594 # ------------------------------
1595 def CostFunction(w):
1596 _W = numpy.asmatrix(numpy.ravel( w )).T
1597 if selfA._parameters["StoreInternalVariables"] or \
1598 selfA._toStore("CurrentState") or \
1599 selfA._toStore("CurrentOptimum"):
1600 selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
1601 if selfA._toStore("SimulatedObservationAtCurrentState") or \
1602 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1603 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
1604 if selfA._toStore("InnovationAtCurrentState"):
1605 selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
1607 Jb = float( 0.5 * _W.T * HBHTpR * _W )
1608 Jo = float( - _W.T * Innovation )
1611 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1612 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1613 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1614 selfA.StoredVariables["CostFunctionJ" ].store( J )
1615 if selfA._toStore("IndexOfOptimum") or \
1616 selfA._toStore("CurrentOptimum") or \
1617 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1618 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1619 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1620 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1621 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1622 if selfA._toStore("IndexOfOptimum"):
1623 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1624 if selfA._toStore("CurrentOptimum"):
1625 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1626 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1627 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1628 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1629 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1630 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1631 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1632 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1633 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1636 def GradientOfCostFunction(w):
1637 _W = numpy.asmatrix(numpy.ravel( w )).T
1638 GradJb = HBHTpR * _W
1639 GradJo = - Innovation
1640 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1643 # Minimisation de la fonctionnelle
1644 # --------------------------------
1645 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1647 if selfA._parameters["Minimizer"] == "LBFGSB":
1648 # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1649 if "0.19" <= scipy.version.version <= "1.1.0":
1650 import lbfgsbhlt as optimiseur
1652 import scipy.optimize as optimiseur
1653 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1654 func = CostFunction,
1656 fprime = GradientOfCostFunction,
1658 bounds = selfA._parameters["Bounds"],
1659 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
1660 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
1661 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1662 iprint = selfA._parameters["optiprint"],
1664 nfeval = Informations['funcalls']
1665 rc = Informations['warnflag']
1666 elif selfA._parameters["Minimizer"] == "TNC":
1667 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1668 func = CostFunction,
1670 fprime = GradientOfCostFunction,
1672 bounds = selfA._parameters["Bounds"],
1673 maxfun = selfA._parameters["MaximumNumberOfSteps"],
1674 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1675 ftol = selfA._parameters["CostDecrementTolerance"],
1676 messages = selfA._parameters["optmessages"],
1678 elif selfA._parameters["Minimizer"] == "CG":
1679 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1682 fprime = GradientOfCostFunction,
1684 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1685 gtol = selfA._parameters["GradientNormTolerance"],
1686 disp = selfA._parameters["optdisp"],
1689 elif selfA._parameters["Minimizer"] == "NCG":
1690 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1693 fprime = GradientOfCostFunction,
1695 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1696 avextol = selfA._parameters["CostDecrementTolerance"],
1697 disp = selfA._parameters["optdisp"],
1700 elif selfA._parameters["Minimizer"] == "BFGS":
1701 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1704 fprime = GradientOfCostFunction,
1706 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1707 gtol = selfA._parameters["GradientNormTolerance"],
1708 disp = selfA._parameters["optdisp"],
1712 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1714 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1715 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1717 # Correction pour pallier a un bug de TNC sur le retour du Minimum
1718 # ----------------------------------------------------------------
1719 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1720 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1721 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1723 Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
1725 # Obtention de l'analyse
1726 # ----------------------
1729 selfA.StoredVariables["Analysis"].store( Xa )
1731 if selfA._toStore("OMA") or \
1732 selfA._toStore("SigmaObs2") or \
1733 selfA._toStore("SimulationQuantiles") or \
1734 selfA._toStore("SimulatedObservationAtOptimum"):
1735 if selfA._toStore("SimulatedObservationAtCurrentState"):
1736 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1737 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1738 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1742 # Calcul de la covariance d'analyse
1743 # ---------------------------------
1744 if selfA._toStore("APosterioriCovariance") or \
1745 selfA._toStore("SimulationQuantiles") or \
1746 selfA._toStore("JacobianMatrixAtOptimum") or \
1747 selfA._toStore("KalmanGainAtOptimum"):
1748 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1749 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1750 if selfA._toStore("APosterioriCovariance") or \
1751 selfA._toStore("SimulationQuantiles") or \
1752 selfA._toStore("KalmanGainAtOptimum"):
1753 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1754 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1755 if selfA._toStore("APosterioriCovariance") or \
1756 selfA._toStore("SimulationQuantiles"):
1762 _ee = numpy.matrix(numpy.zeros(nb)).T
1764 _HtEE = numpy.dot(HtM,_ee)
1765 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
1766 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1767 HessienneI = numpy.matrix( HessienneI )
1769 if min(A.shape) != max(A.shape):
1770 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1771 if (numpy.diag(A) < 0).any():
1772 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1773 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1775 L = numpy.linalg.cholesky( A )
1777 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1778 if selfA._toStore("APosterioriCovariance"):
1779 selfA.StoredVariables["APosterioriCovariance"].store( A )
1780 if selfA._toStore("JacobianMatrixAtOptimum"):
1781 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1782 if selfA._toStore("KalmanGainAtOptimum"):
1783 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1784 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1785 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1787 # Calculs et/ou stockages supplémentaires
1788 # ---------------------------------------
1789 if selfA._toStore("Innovation") or \
1790 selfA._toStore("SigmaObs2") or \
1791 selfA._toStore("MahalanobisConsistency") or \
1792 selfA._toStore("OMB"):
1794 if selfA._toStore("Innovation"):
1795 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1796 if selfA._toStore("BMA"):
1797 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1798 if selfA._toStore("OMA"):
1799 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1800 if selfA._toStore("OMB"):
1801 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1802 if selfA._toStore("SigmaObs2"):
1803 TraceR = R.trace(Y.size)
1804 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1805 if selfA._toStore("MahalanobisConsistency"):
1806 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1807 if selfA._toStore("SimulationQuantiles"):
1808 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1809 HXa = numpy.matrix(numpy.ravel( HXa )).T
1811 for i in range(nech):
1812 if selfA._parameters["SimulationForQuantiles"] == "Linear":
1813 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1814 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1816 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1817 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1818 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1822 YfQ = numpy.hstack((YfQ,Yr))
1825 for quantile in selfA._parameters["Quantiles"]:
1826 if not (0. <= float(quantile) <= 1.): continue
1827 indice = int(nech * float(quantile) - 1./nech)
1828 if YQ is None: YQ = YfQ[:,indice]
1829 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
1830 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1831 if selfA._toStore("SimulatedObservationAtBackground"):
1832 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1833 if selfA._toStore("SimulatedObservationAtOptimum"):
1834 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1838 # ==============================================================================
1839 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
1841 Stochastic EnKF (Envensen 1994, Burgers 1998)
1843 selfA est identique au "self" d'algorithme appelant et contient les
1846 if selfA._parameters["EstimationOf"] == "Parameters":
1847 selfA._parameters["StoreInternalVariables"] = True
1851 H = HO["Direct"].appliedControledFormTo
1853 if selfA._parameters["EstimationOf"] == "State":
1854 M = EM["Direct"].appliedControledFormTo
1856 if CM is not None and "Tangent" in CM and U is not None:
1857 Cm = CM["Tangent"].asMatrix(Xb)
1861 # Nombre de pas identique au nombre de pas d'observations
1862 # -------------------------------------------------------
1863 if hasattr(Y,"stepnumber"):
1864 duration = Y.stepnumber()
1865 __p = numpy.cumprod(Y.shape())[-1]
1868 __p = numpy.array(Y).size
1870 # Précalcul des inversions de B et R
1871 # ----------------------------------
1872 if selfA._parameters["StoreInternalVariables"] \
1873 or selfA._toStore("CostFunctionJ") \
1874 or selfA._toStore("CostFunctionJb") \
1875 or selfA._toStore("CostFunctionJo") \
1876 or selfA._toStore("CurrentOptimum") \
1877 or selfA._toStore("APosterioriCovariance"):
1884 __m = selfA._parameters["NumberOfMembers"]
1885 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1887 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1889 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
1891 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1893 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1894 selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) )
1895 if selfA._toStore("APosterioriCovariance"):
1896 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1899 previousJMinimum = numpy.finfo(float).max
1901 for step in range(duration-1):
1902 if hasattr(Y,"store"):
1903 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
1905 Ynpu = numpy.ravel( Y ).reshape((__p,-1))
1908 if hasattr(U,"store") and len(U)>1:
1909 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1910 elif hasattr(U,"store") and len(U)==1:
1911 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1913 Un = numpy.asmatrix(numpy.ravel( U )).T
1917 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1918 Xn = CovarianceInflation( Xn,
1919 selfA._parameters["InflationType"],
1920 selfA._parameters["InflationFactor"],
1923 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1924 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1926 returnSerieAsArrayMatrix = True )
1927 qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
1928 Xn_predicted = EMX + qi
1929 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1931 returnSerieAsArrayMatrix = True )
1932 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1933 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1934 Xn_predicted = Xn_predicted + Cm * Un
1935 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1936 # --- > Par principe, M = Id, Q = 0
1938 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1940 returnSerieAsArrayMatrix = True )
1942 # Mean of forecast and observation of forecast
1943 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
1944 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
1946 #--------------------------
1947 if VariantM == "KalmanFilterFormula05":
1948 PfHT, HPfHT = 0., 0.
1949 for i in range(__m):
1950 Exfi = Xn_predicted[:,i].reshape((__n,-1)) - Xfm
1951 Eyfi = HX_predicted[:,i].reshape((__p,-1)) - Hfm
1952 PfHT += Exfi * Eyfi.T
1953 HPfHT += Eyfi * Eyfi.T
1954 PfHT = (1./(__m-1)) * PfHT
1955 HPfHT = (1./(__m-1)) * HPfHT
1956 Kn = PfHT * ( R + HPfHT ).I
1959 for i in range(__m):
1960 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
1961 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
1962 #--------------------------
1963 elif VariantM == "KalmanFilterFormula16":
1964 EpY = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
1965 EpYm = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
1967 EaX = EnsembleOfAnomalies( Xn_predicted ) / numpy.sqrt(__m-1)
1968 EaY = (HX_predicted - Hfm - EpY + EpYm) / numpy.sqrt(__m-1)
1970 Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
1972 for i in range(__m):
1973 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
1974 #--------------------------
1976 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1978 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1979 Xn = CovarianceInflation( Xn,
1980 selfA._parameters["InflationType"],
1981 selfA._parameters["InflationFactor"],
1984 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
1985 #--------------------------
1987 if selfA._parameters["StoreInternalVariables"] \
1988 or selfA._toStore("CostFunctionJ") \
1989 or selfA._toStore("CostFunctionJb") \
1990 or selfA._toStore("CostFunctionJo") \
1991 or selfA._toStore("APosterioriCovariance") \
1992 or selfA._toStore("InnovationAtCurrentAnalysis") \
1993 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1994 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1995 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1996 _Innovation = Ynpu - _HXa
1998 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1999 # ---> avec analysis
2000 selfA.StoredVariables["Analysis"].store( Xa )
2001 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2002 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2003 if selfA._toStore("InnovationAtCurrentAnalysis"):
2004 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2005 # ---> avec current state
2006 if selfA._parameters["StoreInternalVariables"] \
2007 or selfA._toStore("CurrentState"):
2008 selfA.StoredVariables["CurrentState"].store( Xn )
2009 if selfA._toStore("ForecastState"):
2010 selfA.StoredVariables["ForecastState"].store( EMX )
2011 if selfA._toStore("BMA"):
2012 selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
2013 if selfA._toStore("InnovationAtCurrentState"):
2014 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
2015 if selfA._toStore("SimulatedObservationAtCurrentState") \
2016 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2017 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2019 if selfA._parameters["StoreInternalVariables"] \
2020 or selfA._toStore("CostFunctionJ") \
2021 or selfA._toStore("CostFunctionJb") \
2022 or selfA._toStore("CostFunctionJo") \
2023 or selfA._toStore("CurrentOptimum") \
2024 or selfA._toStore("APosterioriCovariance"):
2025 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2026 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2028 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2029 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2030 selfA.StoredVariables["CostFunctionJ" ].store( J )
2032 if selfA._toStore("IndexOfOptimum") \
2033 or selfA._toStore("CurrentOptimum") \
2034 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2035 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2036 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2037 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2038 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2039 if selfA._toStore("IndexOfOptimum"):
2040 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2041 if selfA._toStore("CurrentOptimum"):
2042 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2043 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2044 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2045 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2046 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2047 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2048 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2049 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2050 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2051 if selfA._toStore("APosterioriCovariance"):
2052 Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2054 Pn = 0.5 * (Pn + Pn.T)
2055 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2056 if selfA._parameters["EstimationOf"] == "Parameters" \
2057 and J < previousJMinimum:
2058 previousJMinimum = J
2060 if selfA._toStore("APosterioriCovariance"):
2061 covarianceXaMin = Pn
2063 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2064 # ----------------------------------------------------------------------
2065 if selfA._parameters["EstimationOf"] == "Parameters":
2066 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2067 selfA.StoredVariables["Analysis"].store( XaMin )
2068 if selfA._toStore("APosterioriCovariance"):
2069 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2070 if selfA._toStore("BMA"):
2071 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2075 # ==============================================================================
2076 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
2078 Ensemble-Transform EnKF (ETKF or Deterministic EnKF: Bishop 2001, Hunt 2007)
2080 selfA est identique au "self" d'algorithme appelant et contient les
2083 if selfA._parameters["EstimationOf"] == "Parameters":
2084 selfA._parameters["StoreInternalVariables"] = True
2088 H = HO["Direct"].appliedControledFormTo
2090 if selfA._parameters["EstimationOf"] == "State":
2091 M = EM["Direct"].appliedControledFormTo
2093 if CM is not None and "Tangent" in CM and U is not None:
2094 Cm = CM["Tangent"].asMatrix(Xb)
2098 # Nombre de pas identique au nombre de pas d'observations
2099 # -------------------------------------------------------
2100 if hasattr(Y,"stepnumber"):
2101 duration = Y.stepnumber()
2102 __p = numpy.cumprod(Y.shape())[-1]
2105 __p = numpy.array(Y).size
2107 # Précalcul des inversions de B et R
2108 # ----------------------------------
2109 if selfA._parameters["StoreInternalVariables"] \
2110 or selfA._toStore("CostFunctionJ") \
2111 or selfA._toStore("CostFunctionJb") \
2112 or selfA._toStore("CostFunctionJo") \
2113 or selfA._toStore("CurrentOptimum") \
2114 or selfA._toStore("APosterioriCovariance"):
2117 elif VariantM != "KalmanFilterFormula":
2119 if VariantM == "KalmanFilterFormula":
2120 RIdemi = R.choleskyI()
2125 __m = selfA._parameters["NumberOfMembers"]
2126 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2128 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2130 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2132 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2134 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2135 selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) )
2136 if selfA._toStore("APosterioriCovariance"):
2137 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2140 previousJMinimum = numpy.finfo(float).max
2142 for step in range(duration-1):
2143 if hasattr(Y,"store"):
2144 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
2146 Ynpu = numpy.ravel( Y ).reshape((__p,-1))
2149 if hasattr(U,"store") and len(U)>1:
2150 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2151 elif hasattr(U,"store") and len(U)==1:
2152 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2154 Un = numpy.asmatrix(numpy.ravel( U )).T
2158 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2159 Xn = CovarianceInflation( Xn,
2160 selfA._parameters["InflationType"],
2161 selfA._parameters["InflationFactor"],
2164 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2165 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2167 returnSerieAsArrayMatrix = True )
2168 qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2169 Xn_predicted = EMX + qi
2170 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2172 returnSerieAsArrayMatrix = True )
2173 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2174 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2175 Xn_predicted = Xn_predicted + Cm * Un
2176 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2177 # --- > Par principe, M = Id, Q = 0
2179 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2181 returnSerieAsArrayMatrix = True )
2183 # Mean of forecast and observation of forecast
2184 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2185 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2188 EaX = EnsembleOfAnomalies( Xn_predicted )
2189 EaHX = numpy.array(HX_predicted - Hfm)
2191 #--------------------------
2192 if VariantM == "KalmanFilterFormula":
2193 mS = RIdemi * EaHX / numpy.sqrt(__m-1)
2194 delta = RIdemi * ( Ynpu - Hfm )
2195 mT = numpy.linalg.inv( numpy.eye(__m) + mS.T @ mS )
2196 vw = mT @ mS.transpose() @ delta
2198 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
2201 EaX = EaX / numpy.sqrt(__m-1)
2202 Xn = Xfm + EaX @ ( vw.reshape((__m,-1)) + numpy.sqrt(__m-1) * Tdemi @ mU )
2203 #--------------------------
2204 elif VariantM == "Variational":
2205 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2206 def CostFunction(w):
2207 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2208 _Jo = 0.5 * _A.T @ (RI * _A)
2209 _Jb = 0.5 * (__m-1) * w.T @ w
2212 def GradientOfCostFunction(w):
2213 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2214 _GardJo = - EaHX.T @ (RI * _A)
2215 _GradJb = (__m-1) * w.reshape((__m,1))
2216 _GradJ = _GardJo + _GradJb
2217 return numpy.ravel(_GradJ)
2218 vw = scipy.optimize.fmin_cg(
2220 x0 = numpy.zeros(__m),
2221 fprime = GradientOfCostFunction,
2226 Hto = EaHX.T @ (RI * EaHX)
2227 Htb = (__m-1) * numpy.eye(__m)
2230 Pta = numpy.linalg.inv( Hta )
2231 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2233 Xn = Xfm + EaX @ (vw[:,None] + EWa)
2234 #--------------------------
2235 elif VariantM == "FiniteSize11": # Jauge Boc2011
2236 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2237 def CostFunction(w):
2238 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2239 _Jo = 0.5 * _A.T @ (RI * _A)
2240 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
2243 def GradientOfCostFunction(w):
2244 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2245 _GardJo = - EaHX.T @ (RI * _A)
2246 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
2247 _GradJ = _GardJo + _GradJb
2248 return numpy.ravel(_GradJ)
2249 vw = scipy.optimize.fmin_cg(
2251 x0 = numpy.zeros(__m),
2252 fprime = GradientOfCostFunction,
2257 Hto = EaHX.T @ (RI * EaHX)
2259 ( (1 + 1/__m + vw.T @ vw) * numpy.eye(__m) - 2 * vw @ vw.T ) \
2260 / (1 + 1/__m + vw.T @ vw)**2
2263 Pta = numpy.linalg.inv( Hta )
2264 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2266 Xn = Xfm + EaX @ (vw.reshape((__m,-1)) + EWa)
2267 #--------------------------
2268 elif VariantM == "FiniteSize15": # Jauge Boc2015
2269 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2270 def CostFunction(w):
2271 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2272 _Jo = 0.5 * _A.T * RI * _A
2273 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
2276 def GradientOfCostFunction(w):
2277 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2278 _GardJo = - EaHX.T @ (RI * _A)
2279 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
2280 _GradJ = _GardJo + _GradJb
2281 return numpy.ravel(_GradJ)
2282 vw = scipy.optimize.fmin_cg(
2284 x0 = numpy.zeros(__m),
2285 fprime = GradientOfCostFunction,
2290 Hto = EaHX.T @ (RI * EaHX)
2292 ( (1 + 1/__m + vw.T @ vw) * numpy.eye(__m) - 2 * vw @ vw.T ) \
2293 / (1 + 1/__m + vw.T @ vw)**2
2296 Pta = numpy.linalg.inv( Hta )
2297 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2299 Xn = Xfm + EaX @ (vw.reshape((__m,-1)) + EWa)
2300 #--------------------------
2301 elif VariantM == "FiniteSize16": # Jauge Boc2016
2302 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2303 def CostFunction(w):
2304 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2305 _Jo = 0.5 * _A.T @ (RI * _A)
2306 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
2309 def GradientOfCostFunction(w):
2310 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2311 _GardJo = - EaHX.T @ (RI * _A)
2312 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
2313 _GradJ = _GardJo + _GradJb
2314 return numpy.ravel(_GradJ)
2315 vw = scipy.optimize.fmin_cg(
2317 x0 = numpy.zeros(__m),
2318 fprime = GradientOfCostFunction,
2323 Hto = EaHX.T @ (RI * EaHX)
2324 Htb = ((__m+1) / (__m-1)) * \
2325 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.eye(__m) - 2 * vw @ vw.T / (__m-1) ) \
2326 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
2329 Pta = numpy.linalg.inv( Hta )
2330 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2332 Xn = Xfm + EaX @ (vw[:,None] + EWa)
2333 #--------------------------
2335 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2337 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2338 Xn = CovarianceInflation( Xn,
2339 selfA._parameters["InflationType"],
2340 selfA._parameters["InflationFactor"],
2343 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2344 #--------------------------
2346 if selfA._parameters["StoreInternalVariables"] \
2347 or selfA._toStore("CostFunctionJ") \
2348 or selfA._toStore("CostFunctionJb") \
2349 or selfA._toStore("CostFunctionJo") \
2350 or selfA._toStore("APosterioriCovariance") \
2351 or selfA._toStore("InnovationAtCurrentAnalysis") \
2352 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2353 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2354 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2355 _Innovation = Ynpu - _HXa
2357 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2358 # ---> avec analysis
2359 selfA.StoredVariables["Analysis"].store( Xa )
2360 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2361 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2362 if selfA._toStore("InnovationAtCurrentAnalysis"):
2363 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2364 # ---> avec current state
2365 if selfA._parameters["StoreInternalVariables"] \
2366 or selfA._toStore("CurrentState"):
2367 selfA.StoredVariables["CurrentState"].store( Xn )
2368 if selfA._toStore("ForecastState"):
2369 selfA.StoredVariables["ForecastState"].store( EMX )
2370 if selfA._toStore("BMA"):
2371 selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
2372 if selfA._toStore("InnovationAtCurrentState"):
2373 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu.reshape((__p,1)) )
2374 if selfA._toStore("SimulatedObservationAtCurrentState") \
2375 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2376 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2378 if selfA._parameters["StoreInternalVariables"] \
2379 or selfA._toStore("CostFunctionJ") \
2380 or selfA._toStore("CostFunctionJb") \
2381 or selfA._toStore("CostFunctionJo") \
2382 or selfA._toStore("CurrentOptimum") \
2383 or selfA._toStore("APosterioriCovariance"):
2384 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2385 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2387 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2388 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2389 selfA.StoredVariables["CostFunctionJ" ].store( J )
2391 if selfA._toStore("IndexOfOptimum") \
2392 or selfA._toStore("CurrentOptimum") \
2393 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2394 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2395 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2396 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2397 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2398 if selfA._toStore("IndexOfOptimum"):
2399 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2400 if selfA._toStore("CurrentOptimum"):
2401 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2402 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2403 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2404 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2405 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2406 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2407 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2408 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2409 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2410 if selfA._toStore("APosterioriCovariance"):
2411 Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2413 Pn = 0.5 * (Pn + Pn.T)
2414 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2415 if selfA._parameters["EstimationOf"] == "Parameters" \
2416 and J < previousJMinimum:
2417 previousJMinimum = J
2419 if selfA._toStore("APosterioriCovariance"):
2420 covarianceXaMin = Pn
2422 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2423 # ----------------------------------------------------------------------
2424 if selfA._parameters["EstimationOf"] == "Parameters":
2425 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2426 selfA.StoredVariables["Analysis"].store( XaMin )
2427 if selfA._toStore("APosterioriCovariance"):
2428 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2429 if selfA._toStore("BMA"):
2430 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2434 # ==============================================================================
2435 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
2436 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
2438 Maximum Likelihood Ensemble Filter (EnKF/MLEF Zupanski 2005, Bocquet 2013)
2440 selfA est identique au "self" d'algorithme appelant et contient les
2443 if selfA._parameters["EstimationOf"] == "Parameters":
2444 selfA._parameters["StoreInternalVariables"] = True
2448 H = HO["Direct"].appliedControledFormTo
2450 if selfA._parameters["EstimationOf"] == "State":
2451 M = EM["Direct"].appliedControledFormTo
2453 if CM is not None and "Tangent" in CM and U is not None:
2454 Cm = CM["Tangent"].asMatrix(Xb)
2458 # Nombre de pas identique au nombre de pas d'observations
2459 # -------------------------------------------------------
2460 if hasattr(Y,"stepnumber"):
2461 duration = Y.stepnumber()
2462 __p = numpy.cumprod(Y.shape())[-1]
2465 __p = numpy.array(Y).size
2467 # Précalcul des inversions de B et R
2468 # ----------------------------------
2469 if selfA._parameters["StoreInternalVariables"] \
2470 or selfA._toStore("CostFunctionJ") \
2471 or selfA._toStore("CostFunctionJb") \
2472 or selfA._toStore("CostFunctionJo") \
2473 or selfA._toStore("CurrentOptimum") \
2474 or selfA._toStore("APosterioriCovariance"):
2481 __m = selfA._parameters["NumberOfMembers"]
2482 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2484 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2486 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2488 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2490 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2491 selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) )
2492 if selfA._toStore("APosterioriCovariance"):
2493 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2496 previousJMinimum = numpy.finfo(float).max
2498 for step in range(duration-1):
2499 if hasattr(Y,"store"):
2500 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
2502 Ynpu = numpy.ravel( Y ).reshape((__p,-1))
2505 if hasattr(U,"store") and len(U)>1:
2506 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2507 elif hasattr(U,"store") and len(U)==1:
2508 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2510 Un = numpy.asmatrix(numpy.ravel( U )).T
2514 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2515 Xn = CovarianceInflation( Xn,
2516 selfA._parameters["InflationType"],
2517 selfA._parameters["InflationFactor"],
2520 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2521 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2523 returnSerieAsArrayMatrix = True )
2524 qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2525 Xn_predicted = EMX + qi
2526 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2527 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2528 Xn_predicted = Xn_predicted + Cm * Un
2529 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2530 # --- > Par principe, M = Id, Q = 0
2533 #--------------------------
2534 if VariantM == "MLEF13":
2535 Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
2536 EaX = EnsembleOfAnomalies( Xn_predicted ) / numpy.sqrt(__m-1)
2542 vw = numpy.zeros(__m)
2543 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2544 vx1 = (Xfm + EaX @ vw).reshape((__n,-1))
2547 E1 = vx1 + _epsilon * EaX
2549 E1 = vx1 + numpy.sqrt(__m-1) * EaX @ Ta
2551 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2553 returnSerieAsArrayMatrix = True )
2554 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
2557 EaY = (HE2 - vy2) / _epsilon
2559 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / numpy.sqrt(__m-1)
2561 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
2562 mH = numpy.eye(__m) + EaY.transpose() @ (RI * EaY)
2563 Deltaw = - numpy.linalg.solve(mH,GradJ)
2568 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2573 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2575 Xn = vx1 + numpy.sqrt(__m-1) * EaX @ Ta @ Ua
2576 #--------------------------
2578 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2580 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2581 Xn = CovarianceInflation( Xn,
2582 selfA._parameters["InflationType"],
2583 selfA._parameters["InflationFactor"],
2586 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2587 #--------------------------
2589 if selfA._parameters["StoreInternalVariables"] \
2590 or selfA._toStore("CostFunctionJ") \
2591 or selfA._toStore("CostFunctionJb") \
2592 or selfA._toStore("CostFunctionJo") \
2593 or selfA._toStore("APosterioriCovariance") \
2594 or selfA._toStore("InnovationAtCurrentAnalysis") \
2595 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2596 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2597 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2598 _Innovation = Ynpu - _HXa
2600 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2601 # ---> avec analysis
2602 selfA.StoredVariables["Analysis"].store( Xa )
2603 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2604 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2605 if selfA._toStore("InnovationAtCurrentAnalysis"):
2606 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2607 # ---> avec current state
2608 if selfA._parameters["StoreInternalVariables"] \
2609 or selfA._toStore("CurrentState"):
2610 selfA.StoredVariables["CurrentState"].store( Xn )
2611 if selfA._toStore("ForecastState"):
2612 selfA.StoredVariables["ForecastState"].store( EMX )
2613 if selfA._toStore("BMA"):
2614 selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
2615 if selfA._toStore("InnovationAtCurrentState"):
2616 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu.reshape((__p,-1)) )
2617 if selfA._toStore("SimulatedObservationAtCurrentState") \
2618 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2619 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2621 if selfA._parameters["StoreInternalVariables"] \
2622 or selfA._toStore("CostFunctionJ") \
2623 or selfA._toStore("CostFunctionJb") \
2624 or selfA._toStore("CostFunctionJo") \
2625 or selfA._toStore("CurrentOptimum") \
2626 or selfA._toStore("APosterioriCovariance"):
2627 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2628 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2630 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2631 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2632 selfA.StoredVariables["CostFunctionJ" ].store( J )
2634 if selfA._toStore("IndexOfOptimum") \
2635 or selfA._toStore("CurrentOptimum") \
2636 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2637 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2638 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2639 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2640 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2641 if selfA._toStore("IndexOfOptimum"):
2642 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2643 if selfA._toStore("CurrentOptimum"):
2644 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2645 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2646 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2647 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2648 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2649 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2650 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2651 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2652 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2653 if selfA._toStore("APosterioriCovariance"):
2654 Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2656 Pn = 0.5 * (Pn + Pn.T)
2657 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2658 if selfA._parameters["EstimationOf"] == "Parameters" \
2659 and J < previousJMinimum:
2660 previousJMinimum = J
2662 if selfA._toStore("APosterioriCovariance"):
2663 covarianceXaMin = Pn
2665 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2666 # ----------------------------------------------------------------------
2667 if selfA._parameters["EstimationOf"] == "Parameters":
2668 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2669 selfA.StoredVariables["Analysis"].store( XaMin )
2670 if selfA._toStore("APosterioriCovariance"):
2671 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2672 if selfA._toStore("BMA"):
2673 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2677 # ==============================================================================
2678 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
2679 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
2681 Iterative EnKF (Sakov 2012, Sakov 2018)
2683 selfA est identique au "self" d'algorithme appelant et contient les
2686 if selfA._parameters["EstimationOf"] == "Parameters":
2687 selfA._parameters["StoreInternalVariables"] = True
2691 H = HO["Direct"].appliedControledFormTo
2693 if selfA._parameters["EstimationOf"] == "State":
2694 M = EM["Direct"].appliedControledFormTo
2696 if CM is not None and "Tangent" in CM and U is not None:
2697 Cm = CM["Tangent"].asMatrix(Xb)
2701 # Nombre de pas identique au nombre de pas d'observations
2702 # -------------------------------------------------------
2703 if hasattr(Y,"stepnumber"):
2704 duration = Y.stepnumber()
2705 __p = numpy.cumprod(Y.shape())[-1]
2708 __p = numpy.array(Y).size
2710 # Précalcul des inversions de B et R
2711 # ----------------------------------
2712 if selfA._parameters["StoreInternalVariables"] \
2713 or selfA._toStore("CostFunctionJ") \
2714 or selfA._toStore("CostFunctionJb") \
2715 or selfA._toStore("CostFunctionJo") \
2716 or selfA._toStore("CurrentOptimum") \
2717 or selfA._toStore("APosterioriCovariance"):
2724 __m = selfA._parameters["NumberOfMembers"]
2725 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2727 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2729 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2731 Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
2733 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2734 selfA.StoredVariables["Analysis"].store( numpy.ravel(Xb) )
2735 if selfA._toStore("APosterioriCovariance"):
2736 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2739 previousJMinimum = numpy.finfo(float).max
2741 for step in range(duration-1):
2742 if hasattr(Y,"store"):
2743 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
2745 Ynpu = numpy.ravel( Y ).reshape((__p,-1))
2748 if hasattr(U,"store") and len(U)>1:
2749 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2750 elif hasattr(U,"store") and len(U)==1:
2751 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2753 Un = numpy.asmatrix(numpy.ravel( U )).T
2757 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2758 Xn = CovarianceInflation( Xn,
2759 selfA._parameters["InflationType"],
2760 selfA._parameters["InflationFactor"],
2763 #--------------------------
2764 if VariantM == "IEnKF12":
2765 Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
2766 EaX = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1)
2771 vw = numpy.zeros(__m)
2772 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2773 vx1 = (Xfm + EaX @ vw).reshape((__n,-1))
2776 E1 = vx1 + _epsilon * EaX
2778 E1 = vx1 + numpy.sqrt(__m-1) * EaX @ Ta
2780 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
2781 E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2783 returnSerieAsArrayMatrix = True )
2784 elif selfA._parameters["EstimationOf"] == "Parameters":
2785 # --- > Par principe, M = Id
2787 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2788 vy1 = H((vx2, Un)).reshape((__p,-1))
2790 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
2792 returnSerieAsArrayMatrix = True )
2793 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
2796 EaY = (HE2 - vy2) / _epsilon
2798 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / numpy.sqrt(__m-1)
2800 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
2801 mH = numpy.eye(__m) + EaY.transpose() @ (RI * EaY)
2802 Deltaw = - numpy.linalg.solve(mH,GradJ)
2807 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2811 A2 = EnsembleOfAnomalies( E2 )
2814 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2815 A2 = numpy.sqrt(__m-1) * A2 @ Ta / _epsilon
2818 #--------------------------
2820 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2822 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2823 Xn = CovarianceInflation( Xn,
2824 selfA._parameters["InflationType"],
2825 selfA._parameters["InflationFactor"],
2828 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2829 #--------------------------
2831 if selfA._parameters["StoreInternalVariables"] \
2832 or selfA._toStore("CostFunctionJ") \
2833 or selfA._toStore("CostFunctionJb") \
2834 or selfA._toStore("CostFunctionJo") \
2835 or selfA._toStore("APosterioriCovariance") \
2836 or selfA._toStore("InnovationAtCurrentAnalysis") \
2837 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2838 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2839 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2840 _Innovation = Ynpu - _HXa
2842 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2843 # ---> avec analysis
2844 selfA.StoredVariables["Analysis"].store( Xa )
2845 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2846 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2847 if selfA._toStore("InnovationAtCurrentAnalysis"):
2848 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2849 # ---> avec current state
2850 if selfA._parameters["StoreInternalVariables"] \
2851 or selfA._toStore("CurrentState"):
2852 selfA.StoredVariables["CurrentState"].store( Xn )
2853 if selfA._toStore("ForecastState"):
2854 selfA.StoredVariables["ForecastState"].store( E2 )
2855 if selfA._toStore("BMA"):
2856 selfA.StoredVariables["BMA"].store( E2 - Xa )
2857 if selfA._toStore("InnovationAtCurrentState"):
2858 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu.reshape((__p,-1)) )
2859 if selfA._toStore("SimulatedObservationAtCurrentState") \
2860 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2861 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2863 if selfA._parameters["StoreInternalVariables"] \
2864 or selfA._toStore("CostFunctionJ") \
2865 or selfA._toStore("CostFunctionJb") \
2866 or selfA._toStore("CostFunctionJo") \
2867 or selfA._toStore("CurrentOptimum") \
2868 or selfA._toStore("APosterioriCovariance"):
2869 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2870 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2872 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2873 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2874 selfA.StoredVariables["CostFunctionJ" ].store( J )
2876 if selfA._toStore("IndexOfOptimum") \
2877 or selfA._toStore("CurrentOptimum") \
2878 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2879 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2880 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2881 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2882 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2883 if selfA._toStore("IndexOfOptimum"):
2884 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2885 if selfA._toStore("CurrentOptimum"):
2886 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2887 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2888 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2889 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2890 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2891 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2892 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2893 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2894 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2895 if selfA._toStore("APosterioriCovariance"):
2896 Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2898 Pn = 0.5 * (Pn + Pn.T)
2899 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2900 if selfA._parameters["EstimationOf"] == "Parameters" \
2901 and J < previousJMinimum:
2902 previousJMinimum = J
2904 if selfA._toStore("APosterioriCovariance"):
2905 covarianceXaMin = Pn
2907 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2908 # ----------------------------------------------------------------------
2909 if selfA._parameters["EstimationOf"] == "Parameters":
2910 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2911 selfA.StoredVariables["Analysis"].store( XaMin )
2912 if selfA._toStore("APosterioriCovariance"):
2913 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2914 if selfA._toStore("BMA"):
2915 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2919 # ==============================================================================
2920 if __name__ == "__main__":
2921 print('\n AUTODIAGNOSTIC\n')