1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les objets numériques génériques.
26 __author__ = "Jean-Philippe ARGAUD"
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
36 # ==============================================================================
37 def ExecuteFunction( triplet ):
38 assert len(triplet) == 3, "Incorrect number of arguments"
39 X, xArgs, funcrepr = triplet
40 __X = numpy.asmatrix(numpy.ravel( X )).T
41 __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42 __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43 __fonction = getattr(__module,funcrepr["__userFunction__name"])
44 sys.path = __sys_path_tmp ; del __sys_path_tmp
45 if isinstance(xArgs, dict):
46 __HX = __fonction( __X, **xArgs )
48 __HX = __fonction( __X )
49 return numpy.ravel( __HX )
51 # ==============================================================================
52 class FDApproximation(object):
54 Cette classe sert d'interface pour définir les opérateurs approximés. A la
55 création d'un objet, en fournissant une fonction "Function", on obtient un
56 objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
57 "AdjointOperator". On contrôle l'approximation DF avec l'incrément
58 multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
59 "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
60 centrées si le booléen "centeredDF" est vrai.
63 name = "FDApproximation",
68 extraArguments = None,
69 avoidingRedundancy = True,
70 toleranceInRedundancy = 1.e-18,
71 lenghtOfRedundancy = -1,
76 self.__name = str(name)
77 self.__extraArgs = extraArguments
80 import multiprocessing
81 self.__mpEnabled = True
83 self.__mpEnabled = False
85 self.__mpEnabled = False
86 self.__mpWorkers = mpWorkers
87 if self.__mpWorkers is not None and self.__mpWorkers < 1:
88 self.__mpWorkers = None
89 logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
92 self.__mfEnabled = True
94 self.__mfEnabled = False
95 logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
97 if avoidingRedundancy:
99 self.__tolerBP = float(toleranceInRedundancy)
100 self.__lenghtRJ = int(lenghtOfRedundancy)
101 self.__listJPCP = [] # Jacobian Previous Calculated Points
102 self.__listJPCI = [] # Jacobian Previous Calculated Increment
103 self.__listJPCR = [] # Jacobian Previous Calculated Results
104 self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
105 self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
107 self.__avoidRC = False
110 if isinstance(Function,types.FunctionType):
111 logging.debug("FDA Calculs en multiprocessing : FunctionType")
112 self.__userFunction__name = Function.__name__
114 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
116 mod = os.path.abspath(Function.__globals__['__file__'])
117 if not os.path.isfile(mod):
118 raise ImportError("No user defined function or method found with the name %s"%(mod,))
119 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
120 self.__userFunction__path = os.path.dirname(mod)
122 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
123 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
124 elif isinstance(Function,types.MethodType):
125 logging.debug("FDA Calculs en multiprocessing : MethodType")
126 self.__userFunction__name = Function.__name__
128 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
130 mod = os.path.abspath(Function.__func__.__globals__['__file__'])
131 if not os.path.isfile(mod):
132 raise ImportError("No user defined function or method found with the name %s"%(mod,))
133 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
134 self.__userFunction__path = os.path.dirname(mod)
136 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
137 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
139 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
141 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
142 self.__userFunction = self.__userOperator.appliedTo
144 self.__centeredDF = bool(centeredDF)
145 if abs(float(increment)) > 1.e-15:
146 self.__increment = float(increment)
148 self.__increment = 0.01
152 self.__dX = numpy.asmatrix(numpy.ravel( dX )).T
153 logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
155 logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
157 # ---------------------------------------------------------
158 def __doublon__(self, e, l, n, v=None):
159 __ac, __iac = False, -1
160 for i in range(len(l)-1,-1,-1):
161 if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
162 __ac, __iac = True, i
163 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
167 # ---------------------------------------------------------
168 def DirectOperator(self, X, **extraArgs ):
170 Calcul du direct à l'aide de la fonction fournie.
172 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
173 ne doivent pas être données ici à la fonction utilisateur.
175 logging.debug("FDA Calcul DirectOperator (explicite)")
177 _HX = self.__userFunction( X, argsAsSerie = True )
179 _X = numpy.asmatrix(numpy.ravel( X )).T
180 _HX = numpy.ravel(self.__userFunction( _X ))
184 # ---------------------------------------------------------
185 def TangentMatrix(self, X ):
187 Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
188 c'est-à-dire le gradient de H en X. On utilise des différences finies
189 directionnelles autour du point X. X est un numpy.matrix.
191 Différences finies centrées (approximation d'ordre 2):
192 1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
193 dX[i] à la composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
194 on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
196 2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
198 3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
200 Différences finies non centrées (approximation d'ordre 1):
201 1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
202 composante X[i] pour composer X_plus_dXi, et on calcule la réponse
203 HX_plus_dXi = H( X_plus_dXi )
204 2/ On calcule la valeur centrale HX = H(X)
205 3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
207 4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
210 logging.debug("FDA Début du calcul de la Jacobienne")
211 logging.debug("FDA Incrément de............: %s*X"%float(self.__increment))
212 logging.debug("FDA Approximation centrée...: %s"%(self.__centeredDF))
214 if X is None or len(X)==0:
215 raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
217 _X = numpy.asmatrix(numpy.ravel( X )).T
219 if self.__dX is None:
220 _dX = self.__increment * _X
222 _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
224 if (_dX == 0.).any():
227 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
229 _dX = numpy.where( _dX == 0., moyenne, _dX )
231 __alreadyCalculated = False
233 __bidon, __alreadyCalculatedP = self.__doublon__(_X, self.__listJPCP, self.__listJPPN, None)
234 __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
235 if __alreadyCalculatedP == __alreadyCalculatedI > -1:
236 __alreadyCalculated, __i = True, __alreadyCalculatedP
237 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
239 if __alreadyCalculated:
240 logging.debug("FDA Calcul Jacobienne (par récupération du doublon %i)"%__i)
241 _Jacobienne = self.__listJPCR[__i]
243 logging.debug("FDA Calcul Jacobienne (explicite)")
244 if self.__centeredDF:
246 if self.__mpEnabled and not self.__mfEnabled:
248 "__userFunction__path" : self.__userFunction__path,
249 "__userFunction__modl" : self.__userFunction__modl,
250 "__userFunction__name" : self.__userFunction__name,
253 for i in range( len(_dX) ):
255 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
256 _X_plus_dXi[i] = _X[i] + _dXi
257 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
258 _X_moins_dXi[i] = _X[i] - _dXi
260 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
261 _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
263 import multiprocessing
264 self.__pool = multiprocessing.Pool(self.__mpWorkers)
265 _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
270 for i in range( len(_dX) ):
271 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
273 elif self.__mfEnabled:
275 for i in range( len(_dX) ):
277 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
278 _X_plus_dXi[i] = _X[i] + _dXi
279 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
280 _X_moins_dXi[i] = _X[i] - _dXi
282 _xserie.append( _X_plus_dXi )
283 _xserie.append( _X_moins_dXi )
285 _HX_plusmoins_dX = self.DirectOperator( _xserie )
288 for i in range( len(_dX) ):
289 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
293 for i in range( _dX.size ):
295 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
296 _X_plus_dXi[i] = _X[i] + _dXi
297 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
298 _X_moins_dXi[i] = _X[i] - _dXi
300 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
301 _HX_moins_dXi = self.DirectOperator( _X_moins_dXi )
303 _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
307 if self.__mpEnabled and not self.__mfEnabled:
309 "__userFunction__path" : self.__userFunction__path,
310 "__userFunction__modl" : self.__userFunction__modl,
311 "__userFunction__name" : self.__userFunction__name,
314 _jobs.append( (_X.A1, self.__extraArgs, funcrepr) )
315 for i in range( len(_dX) ):
316 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
317 _X_plus_dXi[i] = _X[i] + _dX[i]
319 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
321 import multiprocessing
322 self.__pool = multiprocessing.Pool(self.__mpWorkers)
323 _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
327 _HX = _HX_plus_dX.pop(0)
330 for i in range( len(_dX) ):
331 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
333 elif self.__mfEnabled:
335 _xserie.append( _X.A1 )
336 for i in range( len(_dX) ):
337 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
338 _X_plus_dXi[i] = _X[i] + _dX[i]
340 _xserie.append( _X_plus_dXi )
342 _HX_plus_dX = self.DirectOperator( _xserie )
344 _HX = _HX_plus_dX.pop(0)
347 for i in range( len(_dX) ):
348 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
352 _HX = self.DirectOperator( _X )
353 for i in range( _dX.size ):
355 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
356 _X_plus_dXi[i] = _X[i] + _dXi
358 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
360 _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
363 _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
365 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
366 while len(self.__listJPCP) > self.__lenghtRJ:
367 self.__listJPCP.pop(0)
368 self.__listJPCI.pop(0)
369 self.__listJPCR.pop(0)
370 self.__listJPPN.pop(0)
371 self.__listJPIN.pop(0)
372 self.__listJPCP.append( copy.copy(_X) )
373 self.__listJPCI.append( copy.copy(_dX) )
374 self.__listJPCR.append( copy.copy(_Jacobienne) )
375 self.__listJPPN.append( numpy.linalg.norm(_X) )
376 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
378 logging.debug("FDA Fin du calcul de la Jacobienne")
382 # ---------------------------------------------------------
383 def TangentOperator(self, paire, **extraArgs ):
385 Calcul du tangent à l'aide de la Jacobienne.
387 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
388 ne doivent pas être données ici à la fonction utilisateur.
391 assert len(paire) == 1, "Incorrect lenght of arguments"
393 assert len(_paire) == 2, "Incorrect number of arguments"
395 assert len(paire) == 2, "Incorrect number of arguments"
398 _Jacobienne = self.TangentMatrix( X )
399 if dX is None or len(dX) == 0:
401 # Calcul de la forme matricielle si le second argument est None
402 # -------------------------------------------------------------
403 if self.__mfEnabled: return [_Jacobienne,]
404 else: return _Jacobienne
407 # Calcul de la valeur linéarisée de H en X appliqué à dX
408 # ------------------------------------------------------
409 _dX = numpy.asmatrix(numpy.ravel( dX )).T
410 _HtX = numpy.dot(_Jacobienne, _dX)
411 if self.__mfEnabled: return [_HtX.A1,]
414 # ---------------------------------------------------------
415 def AdjointOperator(self, paire, **extraArgs ):
417 Calcul de l'adjoint à l'aide de la Jacobienne.
419 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
420 ne doivent pas être données ici à la fonction utilisateur.
423 assert len(paire) == 1, "Incorrect lenght of arguments"
425 assert len(_paire) == 2, "Incorrect number of arguments"
427 assert len(paire) == 2, "Incorrect number of arguments"
430 _JacobienneT = self.TangentMatrix( X ).T
431 if Y is None or len(Y) == 0:
433 # Calcul de la forme matricielle si le second argument est None
434 # -------------------------------------------------------------
435 if self.__mfEnabled: return [_JacobienneT,]
436 else: return _JacobienneT
439 # Calcul de la valeur de l'adjoint en X appliqué à Y
440 # --------------------------------------------------
441 _Y = numpy.asmatrix(numpy.ravel( Y )).T
442 _HaY = numpy.dot(_JacobienneT, _Y)
443 if self.__mfEnabled: return [_HaY.A1,]
446 # ==============================================================================
458 Implémentation informatique de l'algorithme MMQR, basée sur la publication :
459 David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
460 Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
463 # Recuperation des donnees et informations initiales
464 # --------------------------------------------------
465 variables = numpy.ravel( x0 )
466 mesures = numpy.ravel( y )
467 increment = sys.float_info[0]
470 quantile = float(quantile)
472 # Calcul des parametres du MM
473 # ---------------------------
474 tn = float(toler) / n
475 e0 = -tn / math.log(tn)
476 epsilon = (e0-tn)/(1+math.log(e0))
478 # Calculs d'initialisation
479 # ------------------------
480 residus = mesures - numpy.ravel( func( variables ) )
481 poids = 1./(epsilon+numpy.abs(residus))
482 veps = 1. - 2. * quantile - residus * poids
483 lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
486 # Recherche iterative
487 # -------------------
488 while (increment > toler) and (iteration < maxfun) :
491 Derivees = numpy.array(fprime(variables))
492 Derivees = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
493 DeriveesT = Derivees.transpose()
494 M = numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
495 SM = numpy.transpose(numpy.dot( DeriveesT , veps ))
496 step = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
498 variables = variables + step
499 if bounds is not None:
500 # Attention : boucle infinie à éviter si un intervalle est trop petit
501 while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
503 variables = variables - step
504 residus = mesures - numpy.ravel( func(variables) )
505 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
507 while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
509 variables = variables - step
510 residus = mesures - numpy.ravel( func(variables) )
511 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
513 increment = lastsurrogate-surrogate
514 poids = 1./(epsilon+numpy.abs(residus))
515 veps = 1. - 2. * quantile - residus * poids
516 lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
520 Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
522 return variables, Ecart, [n,p,iteration,increment,0]
524 # ==============================================================================
525 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
526 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
528 _bgcenter = numpy.ravel(_bgcenter)[:,None]
530 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
532 if _bgcovariance is None:
533 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
535 _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
536 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
538 return BackgroundEnsemble
540 # ==============================================================================
541 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
542 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
543 def __CenteredRandomAnomalies(Zr, N):
545 Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
546 notes manuscrites de MB et conforme au code de PS avec eps = -1
549 Q = numpy.eye(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
550 Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
551 R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
556 _bgcenter = numpy.ravel(_bgcenter)[:,None]
558 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
559 if _bgcovariance is None:
560 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
563 U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
564 _nbctl = _bgcenter.size
565 if _nbmembers > _nbctl:
566 _Z = numpy.concatenate((numpy.dot(
567 numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
568 numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
570 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
571 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
572 BackgroundEnsemble = _bgcenter + _Zca
574 if max(abs(_bgcovariance.flatten())) > 0:
575 _nbctl = _bgcenter.size
576 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
577 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
578 BackgroundEnsemble = _bgcenter + _Zca
580 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
582 return BackgroundEnsemble
584 # ==============================================================================
585 def EnsembleOfAnomalies( _ensemble, _optmean = None):
586 "Renvoie les anomalies centrées à partir d'un ensemble TailleEtat*NbMembres"
588 Em = numpy.asarray(_ensemble).mean(axis=1, dtype=mfp).astype('float')[:,numpy.newaxis]
590 Em = numpy.ravel(_optmean)[:,numpy.newaxis]
592 return numpy.asarray(_ensemble) - Em
594 # ==============================================================================
595 def CovarianceInflation(
597 InflationType = None,
598 InflationFactor = None,
599 BackgroundCov = None,
602 Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
604 Synthèse : Hunt 2007, section 2.3.5
606 if InflationFactor is None:
609 InflationFactor = float(InflationFactor)
611 if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
612 if InflationFactor < 1.:
613 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
614 if InflationFactor < 1.+mpr:
616 OutputCovOrEns = InflationFactor**2 * InputCovOrEns
618 elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
619 if InflationFactor < 1.:
620 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
621 if InflationFactor < 1.+mpr:
623 InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
624 OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
625 + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
627 elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
628 if InflationFactor < 0.:
629 raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
630 if InflationFactor < mpr:
632 __n, __m = numpy.asarray(InputCovOrEns).shape
634 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
635 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.eye(__n)
637 elif InflationType == "HybridOnBackgroundCovariance":
638 if InflationFactor < 0.:
639 raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
640 if InflationFactor < mpr:
642 __n, __m = numpy.asarray(InputCovOrEns).shape
644 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
645 if BackgroundCov is None:
646 raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
647 if InputCovOrEns.shape != BackgroundCov.shape:
648 raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
649 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
651 elif InflationType == "Relaxation":
652 raise NotImplementedError("InflationType Relaxation")
655 raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
657 return OutputCovOrEns
659 # ==============================================================================
660 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
662 3DVAR multi-pas et multi-méthodes
667 Xn = numpy.ravel(Xb).reshape((-1,1))
669 if selfA._parameters["EstimationOf"] == "State":
670 M = EM["Direct"].appliedTo
672 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
673 selfA.StoredVariables["Analysis"].store( Xn )
674 if selfA._toStore("APosterioriCovariance"):
675 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(Xn.size)
677 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
678 if selfA._toStore("ForecastState"):
679 selfA.StoredVariables["ForecastState"].store( Xn )
681 if hasattr(Y,"stepnumber"):
682 duration = Y.stepnumber()
688 for step in range(duration-1):
689 if hasattr(Y,"store"):
690 Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
692 Ynpu = numpy.ravel( Y ).reshape((-1,1))
694 if selfA._parameters["EstimationOf"] == "State": # Forecast
695 Xn = selfA.StoredVariables["Analysis"][-1]
696 Xn_predicted = M( Xn )
697 if selfA._toStore("ForecastState"):
698 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
699 elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
700 # --- > Par principe, M = Id, Q = 0
702 Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
704 oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
708 # ==============================================================================
709 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
718 Hm = HO["Direct"].appliedTo
719 Ha = HO["Adjoint"].appliedInXTo
721 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
722 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
723 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
726 HXb = numpy.asmatrix(numpy.ravel( HXb )).T
727 if Y.size != HXb.size:
728 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
729 if max(Y.shape) != max(HXb.shape):
730 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
732 if selfA._toStore("JacobianMatrixAtBackground"):
733 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
734 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
735 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
737 # Précalcul des inversions de B et R
741 # Point de démarrage de l'optimisation
742 Xini = selfA._parameters["InitializationPoint"]
744 # Définition de la fonction-coût
745 # ------------------------------
747 _X = numpy.asmatrix(numpy.ravel( x )).T
748 if selfA._parameters["StoreInternalVariables"] or \
749 selfA._toStore("CurrentState") or \
750 selfA._toStore("CurrentOptimum"):
751 selfA.StoredVariables["CurrentState"].store( _X )
753 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
754 _Innovation = Y - _HX
755 if selfA._toStore("SimulatedObservationAtCurrentState") or \
756 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
757 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
758 if selfA._toStore("InnovationAtCurrentState"):
759 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
761 Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
762 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
765 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
766 selfA.StoredVariables["CostFunctionJb"].store( Jb )
767 selfA.StoredVariables["CostFunctionJo"].store( Jo )
768 selfA.StoredVariables["CostFunctionJ" ].store( J )
769 if selfA._toStore("IndexOfOptimum") or \
770 selfA._toStore("CurrentOptimum") or \
771 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
772 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
773 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
774 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
775 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
776 if selfA._toStore("IndexOfOptimum"):
777 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
778 if selfA._toStore("CurrentOptimum"):
779 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
780 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
781 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
782 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
783 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
784 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
785 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
786 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
787 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
790 def GradientOfCostFunction(x):
791 _X = numpy.asmatrix(numpy.ravel( x )).T
793 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
794 GradJb = BI * (_X - Xb)
795 GradJo = - Ha( (_X, RI * (Y - _HX)) )
796 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
799 # Minimisation de la fonctionnelle
800 # --------------------------------
801 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
803 if selfA._parameters["Minimizer"] == "LBFGSB":
804 if "0.19" <= scipy.version.version <= "1.1.0":
805 import lbfgsbhlt as optimiseur
807 import scipy.optimize as optimiseur
808 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
811 fprime = GradientOfCostFunction,
813 bounds = selfA._parameters["Bounds"],
814 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
815 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
816 pgtol = selfA._parameters["ProjectedGradientTolerance"],
817 iprint = selfA._parameters["optiprint"],
819 nfeval = Informations['funcalls']
820 rc = Informations['warnflag']
821 elif selfA._parameters["Minimizer"] == "TNC":
822 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
825 fprime = GradientOfCostFunction,
827 bounds = selfA._parameters["Bounds"],
828 maxfun = selfA._parameters["MaximumNumberOfSteps"],
829 pgtol = selfA._parameters["ProjectedGradientTolerance"],
830 ftol = selfA._parameters["CostDecrementTolerance"],
831 messages = selfA._parameters["optmessages"],
833 elif selfA._parameters["Minimizer"] == "CG":
834 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
837 fprime = GradientOfCostFunction,
839 maxiter = selfA._parameters["MaximumNumberOfSteps"],
840 gtol = selfA._parameters["GradientNormTolerance"],
841 disp = selfA._parameters["optdisp"],
844 elif selfA._parameters["Minimizer"] == "NCG":
845 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
848 fprime = GradientOfCostFunction,
850 maxiter = selfA._parameters["MaximumNumberOfSteps"],
851 avextol = selfA._parameters["CostDecrementTolerance"],
852 disp = selfA._parameters["optdisp"],
855 elif selfA._parameters["Minimizer"] == "BFGS":
856 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
859 fprime = GradientOfCostFunction,
861 maxiter = selfA._parameters["MaximumNumberOfSteps"],
862 gtol = selfA._parameters["GradientNormTolerance"],
863 disp = selfA._parameters["optdisp"],
867 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
869 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
870 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
872 # Correction pour pallier a un bug de TNC sur le retour du Minimum
873 # ----------------------------------------------------------------
874 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
875 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
877 # Obtention de l'analyse
878 # ----------------------
879 Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
881 selfA.StoredVariables["Analysis"].store( Xa )
883 if selfA._toStore("OMA") or \
884 selfA._toStore("SigmaObs2") or \
885 selfA._toStore("SimulationQuantiles") or \
886 selfA._toStore("SimulatedObservationAtOptimum"):
887 if selfA._toStore("SimulatedObservationAtCurrentState"):
888 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
889 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
890 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
894 # Calcul de la covariance d'analyse
895 # ---------------------------------
896 if selfA._toStore("APosterioriCovariance") or \
897 selfA._toStore("SimulationQuantiles") or \
898 selfA._toStore("JacobianMatrixAtOptimum") or \
899 selfA._toStore("KalmanGainAtOptimum"):
900 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
901 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
902 if selfA._toStore("APosterioriCovariance") or \
903 selfA._toStore("SimulationQuantiles") or \
904 selfA._toStore("KalmanGainAtOptimum"):
905 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
906 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
907 if selfA._toStore("APosterioriCovariance") or \
908 selfA._toStore("SimulationQuantiles"):
912 _ee = numpy.matrix(numpy.zeros(nb)).T
914 _HtEE = numpy.dot(HtM,_ee)
915 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
916 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
917 HessienneI = numpy.matrix( HessienneI )
919 if min(A.shape) != max(A.shape):
920 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
921 if (numpy.diag(A) < 0).any():
922 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
923 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
925 L = numpy.linalg.cholesky( A )
927 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
928 if selfA._toStore("APosterioriCovariance"):
929 selfA.StoredVariables["APosterioriCovariance"].store( A )
930 if selfA._toStore("JacobianMatrixAtOptimum"):
931 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
932 if selfA._toStore("KalmanGainAtOptimum"):
933 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
934 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
935 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
937 # Calculs et/ou stockages supplémentaires
938 # ---------------------------------------
939 if selfA._toStore("Innovation") or \
940 selfA._toStore("SigmaObs2") or \
941 selfA._toStore("MahalanobisConsistency") or \
942 selfA._toStore("OMB"):
944 if selfA._toStore("Innovation"):
945 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
946 if selfA._toStore("BMA"):
947 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
948 if selfA._toStore("OMA"):
949 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
950 if selfA._toStore("OMB"):
951 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
952 if selfA._toStore("SigmaObs2"):
953 TraceR = R.trace(Y.size)
954 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
955 if selfA._toStore("MahalanobisConsistency"):
956 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
957 if selfA._toStore("SimulationQuantiles"):
958 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
959 HXa = numpy.matrix(numpy.ravel( HXa )).T
961 for i in range(nech):
962 if selfA._parameters["SimulationForQuantiles"] == "Linear":
963 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
964 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
966 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
967 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
968 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
972 YfQ = numpy.hstack((YfQ,Yr))
975 for quantile in selfA._parameters["Quantiles"]:
976 if not (0. <= float(quantile) <= 1.): continue
977 indice = int(nech * float(quantile) - 1./nech)
978 if YQ is None: YQ = YfQ[:,indice]
979 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
980 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
981 if selfA._toStore("SimulatedObservationAtBackground"):
982 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
983 if selfA._toStore("SimulatedObservationAtOptimum"):
984 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
988 # ==============================================================================
989 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
991 3DVAR variational analysis with no inversion of B
998 Hm = HO["Direct"].appliedTo
999 Ha = HO["Adjoint"].appliedInXTo
1001 # Précalcul des inversions de B et R
1005 # Point de démarrage de l'optimisation
1006 Xini = numpy.zeros(Xb.shape)
1008 # Définition de la fonction-coût
1009 # ------------------------------
1010 def CostFunction(v):
1011 _V = numpy.asmatrix(numpy.ravel( v )).T
1013 if selfA._parameters["StoreInternalVariables"] or \
1014 selfA._toStore("CurrentState") or \
1015 selfA._toStore("CurrentOptimum"):
1016 selfA.StoredVariables["CurrentState"].store( _X )
1018 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
1019 _Innovation = Y - _HX
1020 if selfA._toStore("SimulatedObservationAtCurrentState") or \
1021 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1022 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
1023 if selfA._toStore("InnovationAtCurrentState"):
1024 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1026 Jb = float( 0.5 * _V.T * BT * _V )
1027 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1030 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1031 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1032 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1033 selfA.StoredVariables["CostFunctionJ" ].store( J )
1034 if selfA._toStore("IndexOfOptimum") or \
1035 selfA._toStore("CurrentOptimum") or \
1036 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1037 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1038 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1039 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1040 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1041 if selfA._toStore("IndexOfOptimum"):
1042 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1043 if selfA._toStore("CurrentOptimum"):
1044 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1045 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1046 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1047 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1048 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1049 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1050 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1051 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1052 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1055 def GradientOfCostFunction(v):
1056 _V = numpy.asmatrix(numpy.ravel( v )).T
1059 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
1061 GradJo = - Ha( (_X, RI * (Y - _HX)) )
1062 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1065 # Minimisation de la fonctionnelle
1066 # --------------------------------
1067 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1069 if selfA._parameters["Minimizer"] == "LBFGSB":
1070 if "0.19" <= scipy.version.version <= "1.1.0":
1071 import lbfgsbhlt as optimiseur
1073 import scipy.optimize as optimiseur
1074 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1075 func = CostFunction,
1077 fprime = GradientOfCostFunction,
1079 bounds = selfA._parameters["Bounds"],
1080 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
1081 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
1082 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1083 iprint = selfA._parameters["optiprint"],
1085 nfeval = Informations['funcalls']
1086 rc = Informations['warnflag']
1087 elif selfA._parameters["Minimizer"] == "TNC":
1088 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1089 func = CostFunction,
1091 fprime = GradientOfCostFunction,
1093 bounds = selfA._parameters["Bounds"],
1094 maxfun = selfA._parameters["MaximumNumberOfSteps"],
1095 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1096 ftol = selfA._parameters["CostDecrementTolerance"],
1097 messages = selfA._parameters["optmessages"],
1099 elif selfA._parameters["Minimizer"] == "CG":
1100 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1103 fprime = GradientOfCostFunction,
1105 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1106 gtol = selfA._parameters["GradientNormTolerance"],
1107 disp = selfA._parameters["optdisp"],
1110 elif selfA._parameters["Minimizer"] == "NCG":
1111 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1114 fprime = GradientOfCostFunction,
1116 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1117 avextol = selfA._parameters["CostDecrementTolerance"],
1118 disp = selfA._parameters["optdisp"],
1121 elif selfA._parameters["Minimizer"] == "BFGS":
1122 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1125 fprime = GradientOfCostFunction,
1127 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1128 gtol = selfA._parameters["GradientNormTolerance"],
1129 disp = selfA._parameters["optdisp"],
1133 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1135 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1136 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1138 # Correction pour pallier a un bug de TNC sur le retour du Minimum
1139 # ----------------------------------------------------------------
1140 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1141 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1142 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1144 Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
1146 # Obtention de l'analyse
1147 # ----------------------
1150 selfA.StoredVariables["Analysis"].store( Xa )
1152 if selfA._toStore("OMA") or \
1153 selfA._toStore("SigmaObs2") or \
1154 selfA._toStore("SimulationQuantiles") or \
1155 selfA._toStore("SimulatedObservationAtOptimum"):
1156 if selfA._toStore("SimulatedObservationAtCurrentState"):
1157 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1158 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1159 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1163 # Calcul de la covariance d'analyse
1164 # ---------------------------------
1165 if selfA._toStore("APosterioriCovariance") or \
1166 selfA._toStore("SimulationQuantiles") or \
1167 selfA._toStore("JacobianMatrixAtOptimum") or \
1168 selfA._toStore("KalmanGainAtOptimum"):
1169 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1170 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1171 if selfA._toStore("APosterioriCovariance") or \
1172 selfA._toStore("SimulationQuantiles") or \
1173 selfA._toStore("KalmanGainAtOptimum"):
1174 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1175 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1176 if selfA._toStore("APosterioriCovariance") or \
1177 selfA._toStore("SimulationQuantiles"):
1182 _ee = numpy.matrix(numpy.zeros(nb)).T
1184 _HtEE = numpy.dot(HtM,_ee)
1185 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
1186 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1187 HessienneI = numpy.matrix( HessienneI )
1189 if min(A.shape) != max(A.shape):
1190 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1191 if (numpy.diag(A) < 0).any():
1192 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1193 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1195 L = numpy.linalg.cholesky( A )
1197 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1198 if selfA._toStore("APosterioriCovariance"):
1199 selfA.StoredVariables["APosterioriCovariance"].store( A )
1200 if selfA._toStore("JacobianMatrixAtOptimum"):
1201 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1202 if selfA._toStore("KalmanGainAtOptimum"):
1203 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1204 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1205 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1207 # Calculs et/ou stockages supplémentaires
1208 # ---------------------------------------
1209 if selfA._toStore("Innovation") or \
1210 selfA._toStore("SigmaObs2") or \
1211 selfA._toStore("MahalanobisConsistency") or \
1212 selfA._toStore("OMB"):
1214 if selfA._toStore("Innovation"):
1215 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1216 if selfA._toStore("BMA"):
1217 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1218 if selfA._toStore("OMA"):
1219 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1220 if selfA._toStore("OMB"):
1221 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1222 if selfA._toStore("SigmaObs2"):
1223 TraceR = R.trace(Y.size)
1224 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1225 if selfA._toStore("MahalanobisConsistency"):
1226 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1227 if selfA._toStore("SimulationQuantiles"):
1228 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1229 HXa = numpy.matrix(numpy.ravel( HXa )).T
1231 for i in range(nech):
1232 if selfA._parameters["SimulationForQuantiles"] == "Linear":
1233 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1234 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1236 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1237 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1238 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1242 YfQ = numpy.hstack((YfQ,Yr))
1245 for quantile in selfA._parameters["Quantiles"]:
1246 if not (0. <= float(quantile) <= 1.): continue
1247 indice = int(nech * float(quantile) - 1./nech)
1248 if YQ is None: YQ = YfQ[:,indice]
1249 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
1250 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1251 if selfA._toStore("SimulatedObservationAtBackground"):
1252 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1253 if selfA._toStore("SimulatedObservationAtOptimum"):
1254 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1258 # ==============================================================================
1259 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1267 # Opérateur non-linéaire pour la boucle externe
1268 Hm = HO["Direct"].appliedTo
1270 # Précalcul des inversions de B et R
1274 # Point de démarrage de l'optimisation
1275 Xini = selfA._parameters["InitializationPoint"]
1277 HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
1278 Innovation = Y - HXb
1285 Xr = Xini.reshape((-1,1))
1286 while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
1290 Ht = HO["Tangent"].asMatrix(Xr)
1291 Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
1293 # Définition de la fonction-coût
1294 # ------------------------------
1295 def CostFunction(dx):
1296 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1297 if selfA._parameters["StoreInternalVariables"] or \
1298 selfA._toStore("CurrentState") or \
1299 selfA._toStore("CurrentOptimum"):
1300 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
1302 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1303 _dInnovation = Innovation - _HdX
1304 if selfA._toStore("SimulatedObservationAtCurrentState") or \
1305 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1306 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
1307 if selfA._toStore("InnovationAtCurrentState"):
1308 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
1310 Jb = float( 0.5 * _dX.T * BI * _dX )
1311 Jo = float( 0.5 * _dInnovation.T * RI * _dInnovation )
1314 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1315 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1316 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1317 selfA.StoredVariables["CostFunctionJ" ].store( J )
1318 if selfA._toStore("IndexOfOptimum") or \
1319 selfA._toStore("CurrentOptimum") or \
1320 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1321 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1322 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1323 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1324 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1325 if selfA._toStore("IndexOfOptimum"):
1326 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1327 if selfA._toStore("CurrentOptimum"):
1328 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1329 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1330 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1331 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1332 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1333 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1334 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1335 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1336 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1339 def GradientOfCostFunction(dx):
1340 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1342 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1343 _dInnovation = Innovation - _HdX
1345 GradJo = - Ht.T @ (RI * _dInnovation)
1346 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1349 # Minimisation de la fonctionnelle
1350 # --------------------------------
1351 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1353 if selfA._parameters["Minimizer"] == "LBFGSB":
1354 # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1355 if "0.19" <= scipy.version.version <= "1.1.0":
1356 import lbfgsbhlt as optimiseur
1358 import scipy.optimize as optimiseur
1359 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1360 func = CostFunction,
1361 x0 = numpy.zeros(Xini.size),
1362 fprime = GradientOfCostFunction,
1364 bounds = selfA._parameters["Bounds"],
1365 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
1366 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
1367 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1368 iprint = selfA._parameters["optiprint"],
1370 nfeval = Informations['funcalls']
1371 rc = Informations['warnflag']
1372 elif selfA._parameters["Minimizer"] == "TNC":
1373 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1374 func = CostFunction,
1375 x0 = numpy.zeros(Xini.size),
1376 fprime = GradientOfCostFunction,
1378 bounds = selfA._parameters["Bounds"],
1379 maxfun = selfA._parameters["MaximumNumberOfSteps"],
1380 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1381 ftol = selfA._parameters["CostDecrementTolerance"],
1382 messages = selfA._parameters["optmessages"],
1384 elif selfA._parameters["Minimizer"] == "CG":
1385 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1387 x0 = numpy.zeros(Xini.size),
1388 fprime = GradientOfCostFunction,
1390 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1391 gtol = selfA._parameters["GradientNormTolerance"],
1392 disp = selfA._parameters["optdisp"],
1395 elif selfA._parameters["Minimizer"] == "NCG":
1396 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1398 x0 = numpy.zeros(Xini.size),
1399 fprime = GradientOfCostFunction,
1401 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1402 avextol = selfA._parameters["CostDecrementTolerance"],
1403 disp = selfA._parameters["optdisp"],
1406 elif selfA._parameters["Minimizer"] == "BFGS":
1407 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1409 x0 = numpy.zeros(Xini.size),
1410 fprime = GradientOfCostFunction,
1412 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1413 gtol = selfA._parameters["GradientNormTolerance"],
1414 disp = selfA._parameters["optdisp"],
1418 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1420 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1421 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1423 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1424 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1425 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1427 Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
1430 DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
1431 iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
1433 # Obtention de l'analyse
1434 # ----------------------
1437 selfA.StoredVariables["Analysis"].store( Xa )
1439 if selfA._toStore("OMA") or \
1440 selfA._toStore("SigmaObs2") or \
1441 selfA._toStore("SimulationQuantiles") or \
1442 selfA._toStore("SimulatedObservationAtOptimum"):
1443 if selfA._toStore("SimulatedObservationAtCurrentState"):
1444 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1445 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1446 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1450 # Calcul de la covariance d'analyse
1451 # ---------------------------------
1452 if selfA._toStore("APosterioriCovariance") or \
1453 selfA._toStore("SimulationQuantiles") or \
1454 selfA._toStore("JacobianMatrixAtOptimum") or \
1455 selfA._toStore("KalmanGainAtOptimum"):
1456 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1457 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1458 if selfA._toStore("APosterioriCovariance") or \
1459 selfA._toStore("SimulationQuantiles") or \
1460 selfA._toStore("KalmanGainAtOptimum"):
1461 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1462 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1463 if selfA._toStore("APosterioriCovariance") or \
1464 selfA._toStore("SimulationQuantiles"):
1468 _ee = numpy.matrix(numpy.zeros(nb)).T
1470 _HtEE = numpy.dot(HtM,_ee)
1471 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
1472 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1473 HessienneI = numpy.matrix( HessienneI )
1475 if min(A.shape) != max(A.shape):
1476 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1477 if (numpy.diag(A) < 0).any():
1478 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1479 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1481 L = numpy.linalg.cholesky( A )
1483 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1484 if selfA._toStore("APosterioriCovariance"):
1485 selfA.StoredVariables["APosterioriCovariance"].store( A )
1486 if selfA._toStore("JacobianMatrixAtOptimum"):
1487 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1488 if selfA._toStore("KalmanGainAtOptimum"):
1489 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1490 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1491 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1493 # Calculs et/ou stockages supplémentaires
1494 # ---------------------------------------
1495 if selfA._toStore("Innovation") or \
1496 selfA._toStore("SigmaObs2") or \
1497 selfA._toStore("MahalanobisConsistency") or \
1498 selfA._toStore("OMB"):
1500 if selfA._toStore("Innovation"):
1501 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1502 if selfA._toStore("BMA"):
1503 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1504 if selfA._toStore("OMA"):
1505 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1506 if selfA._toStore("OMB"):
1507 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1508 if selfA._toStore("SigmaObs2"):
1509 TraceR = R.trace(Y.size)
1510 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1511 if selfA._toStore("MahalanobisConsistency"):
1512 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1513 if selfA._toStore("SimulationQuantiles"):
1514 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1515 HXa = numpy.matrix(numpy.ravel( HXa )).T
1517 for i in range(nech):
1518 if selfA._parameters["SimulationForQuantiles"] == "Linear":
1519 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1520 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1522 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1523 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1524 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1528 YfQ = numpy.hstack((YfQ,Yr))
1531 for quantile in selfA._parameters["Quantiles"]:
1532 if not (0. <= float(quantile) <= 1.): continue
1533 indice = int(nech * float(quantile) - 1./nech)
1534 if YQ is None: YQ = YfQ[:,indice]
1535 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
1536 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1537 if selfA._toStore("SimulatedObservationAtBackground"):
1538 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1539 if selfA._toStore("SimulatedObservationAtOptimum"):
1540 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1544 # ==============================================================================
1545 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1554 Hm = HO["Direct"].appliedTo
1556 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
1557 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
1558 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
1561 HXb = numpy.asmatrix(numpy.ravel( HXb )).T
1562 if Y.size != HXb.size:
1563 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
1564 if max(Y.shape) != max(HXb.shape):
1565 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
1567 if selfA._toStore("JacobianMatrixAtBackground"):
1568 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
1569 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
1570 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
1572 Ht = HO["Tangent"].asMatrix(Xb)
1574 HBHTpR = R + Ht * BHT
1575 Innovation = Y - HXb
1577 # Point de démarrage de l'optimisation
1578 Xini = numpy.zeros(Xb.shape)
1580 # Définition de la fonction-coût
1581 # ------------------------------
1582 def CostFunction(w):
1583 _W = numpy.asmatrix(numpy.ravel( w )).T
1584 if selfA._parameters["StoreInternalVariables"] or \
1585 selfA._toStore("CurrentState") or \
1586 selfA._toStore("CurrentOptimum"):
1587 selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
1588 if selfA._toStore("SimulatedObservationAtCurrentState") or \
1589 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1590 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
1591 if selfA._toStore("InnovationAtCurrentState"):
1592 selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
1594 Jb = float( 0.5 * _W.T * HBHTpR * _W )
1595 Jo = float( - _W.T * Innovation )
1598 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1599 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1600 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1601 selfA.StoredVariables["CostFunctionJ" ].store( J )
1602 if selfA._toStore("IndexOfOptimum") or \
1603 selfA._toStore("CurrentOptimum") or \
1604 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1605 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1606 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1607 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1608 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1609 if selfA._toStore("IndexOfOptimum"):
1610 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1611 if selfA._toStore("CurrentOptimum"):
1612 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1613 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1614 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1615 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1616 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1617 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1618 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1619 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1620 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1623 def GradientOfCostFunction(w):
1624 _W = numpy.asmatrix(numpy.ravel( w )).T
1625 GradJb = HBHTpR * _W
1626 GradJo = - Innovation
1627 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1630 # Minimisation de la fonctionnelle
1631 # --------------------------------
1632 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1634 if selfA._parameters["Minimizer"] == "LBFGSB":
1635 if "0.19" <= scipy.version.version <= "1.1.0":
1636 import lbfgsbhlt as optimiseur
1638 import scipy.optimize as optimiseur
1639 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1640 func = CostFunction,
1642 fprime = GradientOfCostFunction,
1644 bounds = selfA._parameters["Bounds"],
1645 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
1646 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
1647 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1648 iprint = selfA._parameters["optiprint"],
1650 nfeval = Informations['funcalls']
1651 rc = Informations['warnflag']
1652 elif selfA._parameters["Minimizer"] == "TNC":
1653 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1654 func = CostFunction,
1656 fprime = GradientOfCostFunction,
1658 bounds = selfA._parameters["Bounds"],
1659 maxfun = selfA._parameters["MaximumNumberOfSteps"],
1660 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1661 ftol = selfA._parameters["CostDecrementTolerance"],
1662 messages = selfA._parameters["optmessages"],
1664 elif selfA._parameters["Minimizer"] == "CG":
1665 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1668 fprime = GradientOfCostFunction,
1670 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1671 gtol = selfA._parameters["GradientNormTolerance"],
1672 disp = selfA._parameters["optdisp"],
1675 elif selfA._parameters["Minimizer"] == "NCG":
1676 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1679 fprime = GradientOfCostFunction,
1681 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1682 avextol = selfA._parameters["CostDecrementTolerance"],
1683 disp = selfA._parameters["optdisp"],
1686 elif selfA._parameters["Minimizer"] == "BFGS":
1687 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1690 fprime = GradientOfCostFunction,
1692 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1693 gtol = selfA._parameters["GradientNormTolerance"],
1694 disp = selfA._parameters["optdisp"],
1698 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1700 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1701 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1703 # Correction pour pallier a un bug de TNC sur le retour du Minimum
1704 # ----------------------------------------------------------------
1705 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1706 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1707 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1709 Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
1711 # Obtention de l'analyse
1712 # ----------------------
1715 selfA.StoredVariables["Analysis"].store( Xa )
1717 if selfA._toStore("OMA") or \
1718 selfA._toStore("SigmaObs2") or \
1719 selfA._toStore("SimulationQuantiles") or \
1720 selfA._toStore("SimulatedObservationAtOptimum"):
1721 if selfA._toStore("SimulatedObservationAtCurrentState"):
1722 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1723 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1724 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1728 # Calcul de la covariance d'analyse
1729 # ---------------------------------
1730 if selfA._toStore("APosterioriCovariance") or \
1731 selfA._toStore("SimulationQuantiles") or \
1732 selfA._toStore("JacobianMatrixAtOptimum") or \
1733 selfA._toStore("KalmanGainAtOptimum"):
1734 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1735 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1736 if selfA._toStore("APosterioriCovariance") or \
1737 selfA._toStore("SimulationQuantiles") or \
1738 selfA._toStore("KalmanGainAtOptimum"):
1739 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1740 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1741 if selfA._toStore("APosterioriCovariance") or \
1742 selfA._toStore("SimulationQuantiles"):
1748 _ee = numpy.matrix(numpy.zeros(nb)).T
1750 _HtEE = numpy.dot(HtM,_ee)
1751 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
1752 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1753 HessienneI = numpy.matrix( HessienneI )
1755 if min(A.shape) != max(A.shape):
1756 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1757 if (numpy.diag(A) < 0).any():
1758 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1759 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1761 L = numpy.linalg.cholesky( A )
1763 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1764 if selfA._toStore("APosterioriCovariance"):
1765 selfA.StoredVariables["APosterioriCovariance"].store( A )
1766 if selfA._toStore("JacobianMatrixAtOptimum"):
1767 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1768 if selfA._toStore("KalmanGainAtOptimum"):
1769 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1770 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1771 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1773 # Calculs et/ou stockages supplémentaires
1774 # ---------------------------------------
1775 if selfA._toStore("Innovation") or \
1776 selfA._toStore("SigmaObs2") or \
1777 selfA._toStore("MahalanobisConsistency") or \
1778 selfA._toStore("OMB"):
1780 if selfA._toStore("Innovation"):
1781 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1782 if selfA._toStore("BMA"):
1783 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1784 if selfA._toStore("OMA"):
1785 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1786 if selfA._toStore("OMB"):
1787 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1788 if selfA._toStore("SigmaObs2"):
1789 TraceR = R.trace(Y.size)
1790 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1791 if selfA._toStore("MahalanobisConsistency"):
1792 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1793 if selfA._toStore("SimulationQuantiles"):
1794 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1795 HXa = numpy.matrix(numpy.ravel( HXa )).T
1797 for i in range(nech):
1798 if selfA._parameters["SimulationForQuantiles"] == "Linear":
1799 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1800 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1802 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1803 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1804 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1808 YfQ = numpy.hstack((YfQ,Yr))
1811 for quantile in selfA._parameters["Quantiles"]:
1812 if not (0. <= float(quantile) <= 1.): continue
1813 indice = int(nech * float(quantile) - 1./nech)
1814 if YQ is None: YQ = YfQ[:,indice]
1815 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
1816 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1817 if selfA._toStore("SimulatedObservationAtBackground"):
1818 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1819 if selfA._toStore("SimulatedObservationAtOptimum"):
1820 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1824 # ==============================================================================
1825 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1834 Hm = HO["Direct"].appliedControledFormTo
1835 Mm = EM["Direct"].appliedControledFormTo
1837 if CM is not None and "Tangent" in CM and U is not None:
1838 Cm = CM["Tangent"].asMatrix(Xb)
1844 if hasattr(U,"store") and 1<=_step<len(U) :
1845 _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
1846 elif hasattr(U,"store") and len(U)==1:
1847 _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1849 _Un = numpy.asmatrix(numpy.ravel( U )).T
1854 if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
1855 _Cm = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
1861 # Remarque : les observations sont exploitées à partir du pas de temps
1862 # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
1863 # Donc le pas 0 n'est pas utilisé puisque la première étape commence
1864 # avec l'observation du pas 1.
1866 # Nombre de pas identique au nombre de pas d'observations
1867 if hasattr(Y,"stepnumber"):
1868 duration = Y.stepnumber()
1872 # Précalcul des inversions de B et R
1876 # Point de démarrage de l'optimisation
1877 Xini = selfA._parameters["InitializationPoint"]
1879 # Définition de la fonction-coût
1880 # ------------------------------
1881 selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
1882 selfA.DirectInnovation = [None,] # Le pas 0 n'est pas observé
1883 def CostFunction(x):
1884 _X = numpy.asmatrix(numpy.ravel( x )).T
1885 if selfA._parameters["StoreInternalVariables"] or \
1886 selfA._toStore("CurrentState") or \
1887 selfA._toStore("CurrentOptimum"):
1888 selfA.StoredVariables["CurrentState"].store( _X )
1889 Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
1890 selfA.DirectCalculation = [None,]
1891 selfA.DirectInnovation = [None,]
1894 for step in range(0,duration-1):
1895 if hasattr(Y,"store"):
1896 _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
1898 _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
1902 if selfA._parameters["EstimationOf"] == "State":
1903 _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
1904 elif selfA._parameters["EstimationOf"] == "Parameters":
1907 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1908 _Xn = numpy.max(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
1909 _Xn = numpy.min(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
1911 # Etape de différence aux observations
1912 if selfA._parameters["EstimationOf"] == "State":
1913 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
1914 elif selfA._parameters["EstimationOf"] == "Parameters":
1915 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
1917 # Stockage de l'état
1918 selfA.DirectCalculation.append( _Xn )
1919 selfA.DirectInnovation.append( _YmHMX )
1921 # Ajout dans la fonctionnelle d'observation
1922 Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
1925 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1926 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1927 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1928 selfA.StoredVariables["CostFunctionJ" ].store( J )
1929 if selfA._toStore("IndexOfOptimum") or \
1930 selfA._toStore("CurrentOptimum") or \
1931 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1932 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1933 selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1934 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1935 if selfA._toStore("IndexOfOptimum"):
1936 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1937 if selfA._toStore("CurrentOptimum"):
1938 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1939 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1940 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1941 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1942 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1943 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1944 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1947 def GradientOfCostFunction(x):
1948 _X = numpy.asmatrix(numpy.ravel( x )).T
1949 GradJb = BI * (_X - Xb)
1951 for step in range(duration-1,0,-1):
1952 # Étape de récupération du dernier stockage de l'évolution
1953 _Xn = selfA.DirectCalculation.pop()
1954 # Étape de récupération du dernier stockage de l'innovation
1955 _YmHMX = selfA.DirectInnovation.pop()
1956 # Calcul des adjoints
1957 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
1958 Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
1959 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
1960 Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
1961 # Calcul du gradient par état adjoint
1962 GradJo = GradJo + Ha * RI * _YmHMX # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
1963 GradJo = Ma * GradJo # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
1964 GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
1967 # Minimisation de la fonctionnelle
1968 # --------------------------------
1969 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1971 if selfA._parameters["Minimizer"] == "LBFGSB":
1972 if "0.19" <= scipy.version.version <= "1.1.0":
1973 import lbfgsbhlt as optimiseur
1975 import scipy.optimize as optimiseur
1976 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1977 func = CostFunction,
1979 fprime = GradientOfCostFunction,
1981 bounds = selfA._parameters["Bounds"],
1982 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
1983 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
1984 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1985 iprint = selfA._parameters["optiprint"],
1987 nfeval = Informations['funcalls']
1988 rc = Informations['warnflag']
1989 elif selfA._parameters["Minimizer"] == "TNC":
1990 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1991 func = CostFunction,
1993 fprime = GradientOfCostFunction,
1995 bounds = selfA._parameters["Bounds"],
1996 maxfun = selfA._parameters["MaximumNumberOfSteps"],
1997 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1998 ftol = selfA._parameters["CostDecrementTolerance"],
1999 messages = selfA._parameters["optmessages"],
2001 elif selfA._parameters["Minimizer"] == "CG":
2002 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2005 fprime = GradientOfCostFunction,
2007 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2008 gtol = selfA._parameters["GradientNormTolerance"],
2009 disp = selfA._parameters["optdisp"],
2012 elif selfA._parameters["Minimizer"] == "NCG":
2013 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2016 fprime = GradientOfCostFunction,
2018 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2019 avextol = selfA._parameters["CostDecrementTolerance"],
2020 disp = selfA._parameters["optdisp"],
2023 elif selfA._parameters["Minimizer"] == "BFGS":
2024 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2027 fprime = GradientOfCostFunction,
2029 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2030 gtol = selfA._parameters["GradientNormTolerance"],
2031 disp = selfA._parameters["optdisp"],
2035 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2037 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2038 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2040 # Correction pour pallier a un bug de TNC sur le retour du Minimum
2041 # ----------------------------------------------------------------
2042 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2043 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2045 # Obtention de l'analyse
2046 # ----------------------
2047 Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
2049 selfA.StoredVariables["Analysis"].store( Xa )
2051 # Calculs et/ou stockages supplémentaires
2052 # ---------------------------------------
2053 if selfA._toStore("BMA"):
2054 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2058 # ==============================================================================
2059 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
2063 if selfA._parameters["EstimationOf"] == "Parameters":
2064 selfA._parameters["StoreInternalVariables"] = True
2068 H = HO["Direct"].appliedControledFormTo
2070 if selfA._parameters["EstimationOf"] == "State":
2071 M = EM["Direct"].appliedControledFormTo
2073 if CM is not None and "Tangent" in CM and U is not None:
2074 Cm = CM["Tangent"].asMatrix(Xb)
2078 # Nombre de pas identique au nombre de pas d'observations
2079 # -------------------------------------------------------
2080 if hasattr(Y,"stepnumber"):
2081 duration = Y.stepnumber()
2082 __p = numpy.cumprod(Y.shape())[-1]
2085 __p = numpy.array(Y).size
2087 # Précalcul des inversions de B et R
2088 # ----------------------------------
2089 if selfA._parameters["StoreInternalVariables"] \
2090 or selfA._toStore("CostFunctionJ") \
2091 or selfA._toStore("CostFunctionJb") \
2092 or selfA._toStore("CostFunctionJo") \
2093 or selfA._toStore("CurrentOptimum") \
2094 or selfA._toStore("APosterioriCovariance"):
2101 __m = selfA._parameters["NumberOfMembers"]
2102 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2104 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2106 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2108 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2110 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2111 selfA.StoredVariables["Analysis"].store( Xb )
2112 if selfA._toStore("APosterioriCovariance"):
2113 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2116 previousJMinimum = numpy.finfo(float).max
2118 for step in range(duration-1):
2119 if hasattr(Y,"store"):
2120 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2122 Ynpu = numpy.ravel( Y ).reshape((__p,1))
2125 if hasattr(U,"store") and len(U)>1:
2126 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2127 elif hasattr(U,"store") and len(U)==1:
2128 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2130 Un = numpy.asmatrix(numpy.ravel( U )).T
2134 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2135 Xn = CovarianceInflation( Xn,
2136 selfA._parameters["InflationType"],
2137 selfA._parameters["InflationFactor"],
2140 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2141 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2143 returnSerieAsArrayMatrix = True )
2144 qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2145 Xn_predicted = EMX + qi
2146 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2148 returnSerieAsArrayMatrix = True )
2149 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2150 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2151 Xn_predicted = Xn_predicted + Cm * Un
2152 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2153 # --- > Par principe, M = Id, Q = 0
2155 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2157 returnSerieAsArrayMatrix = True )
2159 # Mean of forecast and observation of forecast
2160 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2161 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2163 #--------------------------
2164 if VariantM == "KalmanFilterFormula05":
2165 PfHT, HPfHT = 0., 0.
2166 for i in range(__m):
2167 Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
2168 Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
2169 PfHT += Exfi * Eyfi.T
2170 HPfHT += Eyfi * Eyfi.T
2171 PfHT = (1./(__m-1)) * PfHT
2172 HPfHT = (1./(__m-1)) * HPfHT
2173 Kn = PfHT * ( R + HPfHT ).I
2176 for i in range(__m):
2177 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
2178 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
2179 #--------------------------
2180 elif VariantM == "KalmanFilterFormula16":
2181 EpY = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
2182 EpYm = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2184 EaX = EnsembleOfAnomalies( Xn_predicted ) / numpy.sqrt(__m-1)
2185 EaY = (HX_predicted - Hfm - EpY + EpYm) / numpy.sqrt(__m-1)
2187 Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
2189 for i in range(__m):
2190 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
2191 #--------------------------
2193 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2195 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2196 Xn = CovarianceInflation( Xn,
2197 selfA._parameters["InflationType"],
2198 selfA._parameters["InflationFactor"],
2201 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2202 #--------------------------
2204 if selfA._parameters["StoreInternalVariables"] \
2205 or selfA._toStore("CostFunctionJ") \
2206 or selfA._toStore("CostFunctionJb") \
2207 or selfA._toStore("CostFunctionJo") \
2208 or selfA._toStore("APosterioriCovariance") \
2209 or selfA._toStore("InnovationAtCurrentAnalysis") \
2210 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2211 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2212 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2213 _Innovation = Ynpu - _HXa
2215 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2216 # ---> avec analysis
2217 selfA.StoredVariables["Analysis"].store( Xa )
2218 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2219 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2220 if selfA._toStore("InnovationAtCurrentAnalysis"):
2221 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2222 # ---> avec current state
2223 if selfA._parameters["StoreInternalVariables"] \
2224 or selfA._toStore("CurrentState"):
2225 selfA.StoredVariables["CurrentState"].store( Xn )
2226 if selfA._toStore("ForecastState"):
2227 selfA.StoredVariables["ForecastState"].store( EMX )
2228 if selfA._toStore("BMA"):
2229 selfA.StoredVariables["BMA"].store( EMX - Xa )
2230 if selfA._toStore("InnovationAtCurrentState"):
2231 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
2232 if selfA._toStore("SimulatedObservationAtCurrentState") \
2233 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2234 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2236 if selfA._parameters["StoreInternalVariables"] \
2237 or selfA._toStore("CostFunctionJ") \
2238 or selfA._toStore("CostFunctionJb") \
2239 or selfA._toStore("CostFunctionJo") \
2240 or selfA._toStore("CurrentOptimum") \
2241 or selfA._toStore("APosterioriCovariance"):
2242 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2243 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2245 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2246 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2247 selfA.StoredVariables["CostFunctionJ" ].store( J )
2249 if selfA._toStore("IndexOfOptimum") \
2250 or selfA._toStore("CurrentOptimum") \
2251 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2252 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2253 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2254 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2255 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2256 if selfA._toStore("IndexOfOptimum"):
2257 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2258 if selfA._toStore("CurrentOptimum"):
2259 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2260 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2261 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2262 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2263 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2264 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2265 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2266 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2267 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2268 if selfA._toStore("APosterioriCovariance"):
2269 Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2271 Pn = 0.5 * (Pn + Pn.T)
2272 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2273 if selfA._parameters["EstimationOf"] == "Parameters" \
2274 and J < previousJMinimum:
2275 previousJMinimum = J
2277 if selfA._toStore("APosterioriCovariance"):
2278 covarianceXaMin = Pn
2280 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2281 # ----------------------------------------------------------------------
2282 if selfA._parameters["EstimationOf"] == "Parameters":
2283 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2284 selfA.StoredVariables["Analysis"].store( XaMin )
2285 if selfA._toStore("APosterioriCovariance"):
2286 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2287 if selfA._toStore("BMA"):
2288 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2292 # ==============================================================================
2293 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
2295 Ensemble-Transform EnKF
2297 if selfA._parameters["EstimationOf"] == "Parameters":
2298 selfA._parameters["StoreInternalVariables"] = True
2302 H = HO["Direct"].appliedControledFormTo
2304 if selfA._parameters["EstimationOf"] == "State":
2305 M = EM["Direct"].appliedControledFormTo
2307 if CM is not None and "Tangent" in CM and U is not None:
2308 Cm = CM["Tangent"].asMatrix(Xb)
2312 # Nombre de pas identique au nombre de pas d'observations
2313 # -------------------------------------------------------
2314 if hasattr(Y,"stepnumber"):
2315 duration = Y.stepnumber()
2316 __p = numpy.cumprod(Y.shape())[-1]
2319 __p = numpy.array(Y).size
2321 # Précalcul des inversions de B et R
2322 # ----------------------------------
2323 if selfA._parameters["StoreInternalVariables"] \
2324 or selfA._toStore("CostFunctionJ") \
2325 or selfA._toStore("CostFunctionJb") \
2326 or selfA._toStore("CostFunctionJo") \
2327 or selfA._toStore("CurrentOptimum") \
2328 or selfA._toStore("APosterioriCovariance"):
2331 elif VariantM != "KalmanFilterFormula":
2333 if VariantM == "KalmanFilterFormula":
2334 RIdemi = R.choleskyI()
2339 __m = selfA._parameters["NumberOfMembers"]
2340 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2342 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2344 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2346 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2348 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2349 selfA.StoredVariables["Analysis"].store( Xb )
2350 if selfA._toStore("APosterioriCovariance"):
2351 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2354 previousJMinimum = numpy.finfo(float).max
2356 for step in range(duration-1):
2357 if hasattr(Y,"store"):
2358 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2360 Ynpu = numpy.ravel( Y ).reshape((__p,1))
2363 if hasattr(U,"store") and len(U)>1:
2364 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2365 elif hasattr(U,"store") and len(U)==1:
2366 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2368 Un = numpy.asmatrix(numpy.ravel( U )).T
2372 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2373 Xn = CovarianceInflation( Xn,
2374 selfA._parameters["InflationType"],
2375 selfA._parameters["InflationFactor"],
2378 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2379 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2381 returnSerieAsArrayMatrix = True )
2382 qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2383 Xn_predicted = EMX + qi
2384 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2386 returnSerieAsArrayMatrix = True )
2387 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2388 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2389 Xn_predicted = Xn_predicted + Cm * Un
2390 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2391 # --- > Par principe, M = Id, Q = 0
2393 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2395 returnSerieAsArrayMatrix = True )
2397 # Mean of forecast and observation of forecast
2398 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2399 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2402 EaX = EnsembleOfAnomalies( Xn_predicted )
2403 EaHX = numpy.array(HX_predicted - Hfm)
2405 #--------------------------
2406 if VariantM == "KalmanFilterFormula":
2407 mS = RIdemi * EaHX / numpy.sqrt(__m-1)
2408 delta = RIdemi * ( Ynpu - Hfm )
2409 mT = numpy.linalg.inv( numpy.eye(__m) + mS.T @ mS )
2410 vw = mT @ mS.transpose() @ delta
2412 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
2415 EaX = EaX / numpy.sqrt(__m-1)
2416 Xn = Xfm + EaX @ ( vw.reshape((__m,1)) + numpy.sqrt(__m-1) * Tdemi @ mU )
2417 #--------------------------
2418 elif VariantM == "Variational":
2419 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2420 def CostFunction(w):
2421 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
2422 _Jo = 0.5 * _A.T @ (RI * _A)
2423 _Jb = 0.5 * (__m-1) * w.T @ w
2426 def GradientOfCostFunction(w):
2427 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
2428 _GardJo = - EaHX.T @ (RI * _A)
2429 _GradJb = (__m-1) * w.reshape((__m,1))
2430 _GradJ = _GardJo + _GradJb
2431 return numpy.ravel(_GradJ)
2432 vw = scipy.optimize.fmin_cg(
2434 x0 = numpy.zeros(__m),
2435 fprime = GradientOfCostFunction,
2440 Hto = EaHX.T @ (RI * EaHX)
2441 Htb = (__m-1) * numpy.eye(__m)
2444 Pta = numpy.linalg.inv( Hta )
2445 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2447 Xn = Xfm + EaX @ (vw[:,None] + EWa)
2448 #--------------------------
2449 elif VariantM == "FiniteSize11": # Jauge Boc2011
2450 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2451 def CostFunction(w):
2452 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
2453 _Jo = 0.5 * _A.T @ (RI * _A)
2454 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
2457 def GradientOfCostFunction(w):
2458 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
2459 _GardJo = - EaHX.T @ (RI * _A)
2460 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
2461 _GradJ = _GardJo + _GradJb
2462 return numpy.ravel(_GradJ)
2463 vw = scipy.optimize.fmin_cg(
2465 x0 = numpy.zeros(__m),
2466 fprime = GradientOfCostFunction,
2471 Hto = EaHX.T @ (RI * EaHX)
2473 ( (1 + 1/__m + vw.T @ vw) * numpy.eye(__m) - 2 * vw @ vw.T ) \
2474 / (1 + 1/__m + vw.T @ vw)**2
2477 Pta = numpy.linalg.inv( Hta )
2478 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2480 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
2481 #--------------------------
2482 elif VariantM == "FiniteSize15": # Jauge Boc2015
2483 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2484 def CostFunction(w):
2485 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
2486 _Jo = 0.5 * _A.T * RI * _A
2487 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
2490 def GradientOfCostFunction(w):
2491 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
2492 _GardJo = - EaHX.T @ (RI * _A)
2493 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
2494 _GradJ = _GardJo + _GradJb
2495 return numpy.ravel(_GradJ)
2496 vw = scipy.optimize.fmin_cg(
2498 x0 = numpy.zeros(__m),
2499 fprime = GradientOfCostFunction,
2504 Hto = EaHX.T @ (RI * EaHX)
2506 ( (1 + 1/__m + vw.T @ vw) * numpy.eye(__m) - 2 * vw @ vw.T ) \
2507 / (1 + 1/__m + vw.T @ vw)**2
2510 Pta = numpy.linalg.inv( Hta )
2511 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2513 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
2514 #--------------------------
2515 elif VariantM == "FiniteSize16": # Jauge Boc2016
2516 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2517 def CostFunction(w):
2518 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
2519 _Jo = 0.5 * _A.T @ (RI * _A)
2520 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
2523 def GradientOfCostFunction(w):
2524 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
2525 _GardJo = - EaHX.T @ (RI * _A)
2526 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
2527 _GradJ = _GardJo + _GradJb
2528 return numpy.ravel(_GradJ)
2529 vw = scipy.optimize.fmin_cg(
2531 x0 = numpy.zeros(__m),
2532 fprime = GradientOfCostFunction,
2537 Hto = EaHX.T @ (RI * EaHX)
2538 Htb = ((__m+1) / (__m-1)) * \
2539 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.eye(__m) - 2 * vw @ vw.T / (__m-1) ) \
2540 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
2543 Pta = numpy.linalg.inv( Hta )
2544 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2546 Xn = Xfm + EaX @ (vw[:,None] + EWa)
2547 #--------------------------
2549 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2551 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2552 Xn = CovarianceInflation( Xn,
2553 selfA._parameters["InflationType"],
2554 selfA._parameters["InflationFactor"],
2557 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2558 #--------------------------
2560 if selfA._parameters["StoreInternalVariables"] \
2561 or selfA._toStore("CostFunctionJ") \
2562 or selfA._toStore("CostFunctionJb") \
2563 or selfA._toStore("CostFunctionJo") \
2564 or selfA._toStore("APosterioriCovariance") \
2565 or selfA._toStore("InnovationAtCurrentAnalysis") \
2566 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2567 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2568 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2569 _Innovation = Ynpu - _HXa
2571 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2572 # ---> avec analysis
2573 selfA.StoredVariables["Analysis"].store( Xa )
2574 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2575 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2576 if selfA._toStore("InnovationAtCurrentAnalysis"):
2577 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2578 # ---> avec current state
2579 if selfA._parameters["StoreInternalVariables"] \
2580 or selfA._toStore("CurrentState"):
2581 selfA.StoredVariables["CurrentState"].store( Xn )
2582 if selfA._toStore("ForecastState"):
2583 selfA.StoredVariables["ForecastState"].store( EMX )
2584 if selfA._toStore("BMA"):
2585 selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
2586 if selfA._toStore("InnovationAtCurrentState"):
2587 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
2588 if selfA._toStore("SimulatedObservationAtCurrentState") \
2589 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2590 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2592 if selfA._parameters["StoreInternalVariables"] \
2593 or selfA._toStore("CostFunctionJ") \
2594 or selfA._toStore("CostFunctionJb") \
2595 or selfA._toStore("CostFunctionJo") \
2596 or selfA._toStore("CurrentOptimum") \
2597 or selfA._toStore("APosterioriCovariance"):
2598 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2599 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2601 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2602 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2603 selfA.StoredVariables["CostFunctionJ" ].store( J )
2605 if selfA._toStore("IndexOfOptimum") \
2606 or selfA._toStore("CurrentOptimum") \
2607 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2608 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2609 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2610 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2611 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2612 if selfA._toStore("IndexOfOptimum"):
2613 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2614 if selfA._toStore("CurrentOptimum"):
2615 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2616 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2617 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2618 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2619 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2620 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2621 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2622 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2623 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2624 if selfA._toStore("APosterioriCovariance"):
2625 Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2627 Pn = 0.5 * (Pn + Pn.T)
2628 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2629 if selfA._parameters["EstimationOf"] == "Parameters" \
2630 and J < previousJMinimum:
2631 previousJMinimum = J
2633 if selfA._toStore("APosterioriCovariance"):
2634 covarianceXaMin = Pn
2636 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2637 # ----------------------------------------------------------------------
2638 if selfA._parameters["EstimationOf"] == "Parameters":
2639 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2640 selfA.StoredVariables["Analysis"].store( XaMin )
2641 if selfA._toStore("APosterioriCovariance"):
2642 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2643 if selfA._toStore("BMA"):
2644 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2648 # ==============================================================================
2649 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
2650 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
2652 Maximum Likelihood Ensemble Filter
2654 if selfA._parameters["EstimationOf"] == "Parameters":
2655 selfA._parameters["StoreInternalVariables"] = True
2659 H = HO["Direct"].appliedControledFormTo
2661 if selfA._parameters["EstimationOf"] == "State":
2662 M = EM["Direct"].appliedControledFormTo
2664 if CM is not None and "Tangent" in CM and U is not None:
2665 Cm = CM["Tangent"].asMatrix(Xb)
2669 # Nombre de pas identique au nombre de pas d'observations
2670 # -------------------------------------------------------
2671 if hasattr(Y,"stepnumber"):
2672 duration = Y.stepnumber()
2673 __p = numpy.cumprod(Y.shape())[-1]
2676 __p = numpy.array(Y).size
2678 # Précalcul des inversions de B et R
2679 # ----------------------------------
2680 if selfA._parameters["StoreInternalVariables"] \
2681 or selfA._toStore("CostFunctionJ") \
2682 or selfA._toStore("CostFunctionJb") \
2683 or selfA._toStore("CostFunctionJo") \
2684 or selfA._toStore("CurrentOptimum") \
2685 or selfA._toStore("APosterioriCovariance"):
2692 __m = selfA._parameters["NumberOfMembers"]
2693 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2695 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2697 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2699 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2701 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2702 selfA.StoredVariables["Analysis"].store( Xb )
2703 if selfA._toStore("APosterioriCovariance"):
2704 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2707 previousJMinimum = numpy.finfo(float).max
2709 for step in range(duration-1):
2710 if hasattr(Y,"store"):
2711 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2713 Ynpu = numpy.ravel( Y ).reshape((__p,1))
2716 if hasattr(U,"store") and len(U)>1:
2717 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2718 elif hasattr(U,"store") and len(U)==1:
2719 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2721 Un = numpy.asmatrix(numpy.ravel( U )).T
2725 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2726 Xn = CovarianceInflation( Xn,
2727 selfA._parameters["InflationType"],
2728 selfA._parameters["InflationFactor"],
2731 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2732 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2734 returnSerieAsArrayMatrix = True )
2735 qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2736 Xn_predicted = EMX + qi
2737 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2738 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2739 Xn_predicted = Xn_predicted + Cm * Un
2740 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2741 # --- > Par principe, M = Id, Q = 0
2744 #--------------------------
2745 if VariantM == "MLEF13":
2746 Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
2747 EaX = EnsembleOfAnomalies( Xn_predicted ) / numpy.sqrt(__m-1)
2753 vw = numpy.zeros(__m)
2754 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2755 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
2758 E1 = vx1 + _epsilon * EaX
2760 E1 = vx1 + numpy.sqrt(__m-1) * EaX @ Ta
2762 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2764 returnSerieAsArrayMatrix = True )
2765 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2768 EaY = (HE2 - vy2) / _epsilon
2770 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / numpy.sqrt(__m-1)
2772 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
2773 mH = numpy.eye(__m) + EaY.transpose() @ (RI * EaY)
2774 Deltaw = - numpy.linalg.solve(mH,GradJ)
2779 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2784 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2786 Xn = vx1 + numpy.sqrt(__m-1) * EaX @ Ta @ Ua
2787 #--------------------------
2789 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2791 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2792 Xn = CovarianceInflation( Xn,
2793 selfA._parameters["InflationType"],
2794 selfA._parameters["InflationFactor"],
2797 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2798 #--------------------------
2800 if selfA._parameters["StoreInternalVariables"] \
2801 or selfA._toStore("CostFunctionJ") \
2802 or selfA._toStore("CostFunctionJb") \
2803 or selfA._toStore("CostFunctionJo") \
2804 or selfA._toStore("APosterioriCovariance") \
2805 or selfA._toStore("InnovationAtCurrentAnalysis") \
2806 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2807 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2808 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2809 _Innovation = Ynpu - _HXa
2811 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2812 # ---> avec analysis
2813 selfA.StoredVariables["Analysis"].store( Xa )
2814 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2815 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2816 if selfA._toStore("InnovationAtCurrentAnalysis"):
2817 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2818 # ---> avec current state
2819 if selfA._parameters["StoreInternalVariables"] \
2820 or selfA._toStore("CurrentState"):
2821 selfA.StoredVariables["CurrentState"].store( Xn )
2822 if selfA._toStore("ForecastState"):
2823 selfA.StoredVariables["ForecastState"].store( EMX )
2824 if selfA._toStore("BMA"):
2825 selfA.StoredVariables["BMA"].store( EMX - Xa )
2826 if selfA._toStore("InnovationAtCurrentState"):
2827 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
2828 if selfA._toStore("SimulatedObservationAtCurrentState") \
2829 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2830 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2832 if selfA._parameters["StoreInternalVariables"] \
2833 or selfA._toStore("CostFunctionJ") \
2834 or selfA._toStore("CostFunctionJb") \
2835 or selfA._toStore("CostFunctionJo") \
2836 or selfA._toStore("CurrentOptimum") \
2837 or selfA._toStore("APosterioriCovariance"):
2838 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2839 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2841 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2842 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2843 selfA.StoredVariables["CostFunctionJ" ].store( J )
2845 if selfA._toStore("IndexOfOptimum") \
2846 or selfA._toStore("CurrentOptimum") \
2847 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2848 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2849 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2850 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2851 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2852 if selfA._toStore("IndexOfOptimum"):
2853 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2854 if selfA._toStore("CurrentOptimum"):
2855 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2856 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2857 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2858 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2859 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2860 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2861 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2862 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2863 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2864 if selfA._toStore("APosterioriCovariance"):
2865 Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2867 Pn = 0.5 * (Pn + Pn.T)
2868 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2869 if selfA._parameters["EstimationOf"] == "Parameters" \
2870 and J < previousJMinimum:
2871 previousJMinimum = J
2873 if selfA._toStore("APosterioriCovariance"):
2874 covarianceXaMin = Pn
2876 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2877 # ----------------------------------------------------------------------
2878 if selfA._parameters["EstimationOf"] == "Parameters":
2879 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2880 selfA.StoredVariables["Analysis"].store( XaMin )
2881 if selfA._toStore("APosterioriCovariance"):
2882 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2883 if selfA._toStore("BMA"):
2884 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2888 # ==============================================================================
2889 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
2890 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
2894 if selfA._parameters["EstimationOf"] == "Parameters":
2895 selfA._parameters["StoreInternalVariables"] = True
2899 H = HO["Direct"].appliedControledFormTo
2901 if selfA._parameters["EstimationOf"] == "State":
2902 M = EM["Direct"].appliedControledFormTo
2904 if CM is not None and "Tangent" in CM and U is not None:
2905 Cm = CM["Tangent"].asMatrix(Xb)
2909 # Nombre de pas identique au nombre de pas d'observations
2910 # -------------------------------------------------------
2911 if hasattr(Y,"stepnumber"):
2912 duration = Y.stepnumber()
2913 __p = numpy.cumprod(Y.shape())[-1]
2916 __p = numpy.array(Y).size
2918 # Précalcul des inversions de B et R
2919 # ----------------------------------
2920 if selfA._parameters["StoreInternalVariables"] \
2921 or selfA._toStore("CostFunctionJ") \
2922 or selfA._toStore("CostFunctionJb") \
2923 or selfA._toStore("CostFunctionJo") \
2924 or selfA._toStore("CurrentOptimum") \
2925 or selfA._toStore("APosterioriCovariance"):
2932 __m = selfA._parameters["NumberOfMembers"]
2933 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2935 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2937 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2939 Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
2941 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2942 selfA.StoredVariables["Analysis"].store( Xb )
2943 if selfA._toStore("APosterioriCovariance"):
2944 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2947 previousJMinimum = numpy.finfo(float).max
2949 for step in range(duration-1):
2950 if hasattr(Y,"store"):
2951 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2953 Ynpu = numpy.ravel( Y ).reshape((__p,1))
2956 if hasattr(U,"store") and len(U)>1:
2957 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2958 elif hasattr(U,"store") and len(U)==1:
2959 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2961 Un = numpy.asmatrix(numpy.ravel( U )).T
2965 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2966 Xn = CovarianceInflation( Xn,
2967 selfA._parameters["InflationType"],
2968 selfA._parameters["InflationFactor"],
2971 #--------------------------
2972 if VariantM == "IEnKF12":
2973 Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
2974 EaX = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1)
2979 vw = numpy.zeros(__m)
2980 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2981 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
2984 E1 = vx1 + _epsilon * EaX
2986 E1 = vx1 + numpy.sqrt(__m-1) * EaX @ Ta
2988 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
2989 E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2991 returnSerieAsArrayMatrix = True )
2992 elif selfA._parameters["EstimationOf"] == "Parameters":
2993 # --- > Par principe, M = Id
2995 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2996 vy1 = H((vx2, Un)).reshape((__p,1))
2998 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
3000 returnSerieAsArrayMatrix = True )
3001 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
3004 EaY = (HE2 - vy2) / _epsilon
3006 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / numpy.sqrt(__m-1)
3008 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
3009 mH = numpy.eye(__m) + EaY.transpose() @ (RI * EaY)
3010 Deltaw = - numpy.linalg.solve(mH,GradJ)
3015 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
3019 A2 = EnsembleOfAnomalies( E2 )
3022 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
3023 A2 = numpy.sqrt(__m-1) * A2 @ Ta / _epsilon
3026 #--------------------------
3028 raise ValueError("VariantM has to be chosen in the authorized methods list.")
3030 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
3031 Xn = CovarianceInflation( Xn,
3032 selfA._parameters["InflationType"],
3033 selfA._parameters["InflationFactor"],
3036 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
3037 #--------------------------
3039 if selfA._parameters["StoreInternalVariables"] \
3040 or selfA._toStore("CostFunctionJ") \
3041 or selfA._toStore("CostFunctionJb") \
3042 or selfA._toStore("CostFunctionJo") \
3043 or selfA._toStore("APosterioriCovariance") \
3044 or selfA._toStore("InnovationAtCurrentAnalysis") \
3045 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
3046 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3047 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
3048 _Innovation = Ynpu - _HXa
3050 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3051 # ---> avec analysis
3052 selfA.StoredVariables["Analysis"].store( Xa )
3053 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
3054 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
3055 if selfA._toStore("InnovationAtCurrentAnalysis"):
3056 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
3057 # ---> avec current state
3058 if selfA._parameters["StoreInternalVariables"] \
3059 or selfA._toStore("CurrentState"):
3060 selfA.StoredVariables["CurrentState"].store( Xn )
3061 if selfA._toStore("ForecastState"):
3062 selfA.StoredVariables["ForecastState"].store( E2 )
3063 if selfA._toStore("BMA"):
3064 selfA.StoredVariables["BMA"].store( E2 - Xa )
3065 if selfA._toStore("InnovationAtCurrentState"):
3066 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
3067 if selfA._toStore("SimulatedObservationAtCurrentState") \
3068 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3069 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
3071 if selfA._parameters["StoreInternalVariables"] \
3072 or selfA._toStore("CostFunctionJ") \
3073 or selfA._toStore("CostFunctionJb") \
3074 or selfA._toStore("CostFunctionJo") \
3075 or selfA._toStore("CurrentOptimum") \
3076 or selfA._toStore("APosterioriCovariance"):
3077 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
3078 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
3080 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3081 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3082 selfA.StoredVariables["CostFunctionJ" ].store( J )
3084 if selfA._toStore("IndexOfOptimum") \
3085 or selfA._toStore("CurrentOptimum") \
3086 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
3087 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
3088 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
3089 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3090 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3091 if selfA._toStore("IndexOfOptimum"):
3092 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3093 if selfA._toStore("CurrentOptimum"):
3094 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
3095 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3096 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
3097 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3098 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3099 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3100 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3101 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3102 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3103 if selfA._toStore("APosterioriCovariance"):
3104 Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
3106 Pn = 0.5 * (Pn + Pn.T)
3107 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3108 if selfA._parameters["EstimationOf"] == "Parameters" \
3109 and J < previousJMinimum:
3110 previousJMinimum = J
3112 if selfA._toStore("APosterioriCovariance"):
3113 covarianceXaMin = Pn
3115 # Stockage final supplémentaire de l'optimum en estimation de paramètres
3116 # ----------------------------------------------------------------------
3117 if selfA._parameters["EstimationOf"] == "Parameters":
3118 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3119 selfA.StoredVariables["Analysis"].store( XaMin )
3120 if selfA._toStore("APosterioriCovariance"):
3121 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3122 if selfA._toStore("BMA"):
3123 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3127 # ==============================================================================
3128 if __name__ == "__main__":
3129 print('\n AUTODIAGNOSTIC\n')