1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les objets numériques génériques.
26 __author__ = "Jean-Philippe ARGAUD"
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
36 # ==============================================================================
37 def ExecuteFunction( paire ):
38 assert len(paire) == 2, "Incorrect number of arguments"
40 __X = numpy.asmatrix(numpy.ravel( X )).T
41 __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42 __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43 __fonction = getattr(__module,funcrepr["__userFunction__name"])
44 sys.path = __sys_path_tmp ; del __sys_path_tmp
45 __HX = __fonction( __X )
46 return numpy.ravel( __HX )
48 # ==============================================================================
49 class FDApproximation(object):
51 Cette classe sert d'interface pour définir les opérateurs approximés. A la
52 création d'un objet, en fournissant une fonction "Function", on obtient un
53 objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
54 "AdjointOperator". On contrôle l'approximation DF avec l'incrément
55 multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
56 "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
57 centrées si le booléen "centeredDF" est vrai.
60 name = "FDApproximation",
65 avoidingRedundancy = True,
66 toleranceInRedundancy = 1.e-18,
67 lenghtOfRedundancy = -1,
72 self.__name = str(name)
75 import multiprocessing
76 self.__mpEnabled = True
78 self.__mpEnabled = False
80 self.__mpEnabled = False
81 self.__mpWorkers = mpWorkers
82 if self.__mpWorkers is not None and self.__mpWorkers < 1:
83 self.__mpWorkers = None
84 logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
87 self.__mfEnabled = True
89 self.__mfEnabled = False
90 logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
92 if avoidingRedundancy:
94 self.__tolerBP = float(toleranceInRedundancy)
95 self.__lenghtRJ = int(lenghtOfRedundancy)
96 self.__listJPCP = [] # Jacobian Previous Calculated Points
97 self.__listJPCI = [] # Jacobian Previous Calculated Increment
98 self.__listJPCR = [] # Jacobian Previous Calculated Results
99 self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
100 self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
102 self.__avoidRC = False
105 if isinstance(Function,types.FunctionType):
106 logging.debug("FDA Calculs en multiprocessing : FunctionType")
107 self.__userFunction__name = Function.__name__
109 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
111 mod = os.path.abspath(Function.__globals__['__file__'])
112 if not os.path.isfile(mod):
113 raise ImportError("No user defined function or method found with the name %s"%(mod,))
114 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
115 self.__userFunction__path = os.path.dirname(mod)
117 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
118 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
119 elif isinstance(Function,types.MethodType):
120 logging.debug("FDA Calculs en multiprocessing : MethodType")
121 self.__userFunction__name = Function.__name__
123 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
125 mod = os.path.abspath(Function.__func__.__globals__['__file__'])
126 if not os.path.isfile(mod):
127 raise ImportError("No user defined function or method found with the name %s"%(mod,))
128 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
129 self.__userFunction__path = os.path.dirname(mod)
131 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
132 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
134 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
136 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
137 self.__userFunction = self.__userOperator.appliedTo
139 self.__centeredDF = bool(centeredDF)
140 if abs(float(increment)) > 1.e-15:
141 self.__increment = float(increment)
143 self.__increment = 0.01
147 self.__dX = numpy.asmatrix(numpy.ravel( dX )).T
148 logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
150 logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
152 # ---------------------------------------------------------
153 def __doublon__(self, e, l, n, v=None):
154 __ac, __iac = False, -1
155 for i in range(len(l)-1,-1,-1):
156 if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
157 __ac, __iac = True, i
158 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
162 # ---------------------------------------------------------
163 def DirectOperator(self, X ):
165 Calcul du direct à l'aide de la fonction fournie.
167 logging.debug("FDA Calcul DirectOperator (explicite)")
169 _HX = self.__userFunction( X, argsAsSerie = True )
171 _X = numpy.asmatrix(numpy.ravel( X )).T
172 _HX = numpy.ravel(self.__userFunction( _X ))
176 # ---------------------------------------------------------
177 def TangentMatrix(self, X ):
179 Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
180 c'est-à-dire le gradient de H en X. On utilise des différences finies
181 directionnelles autour du point X. X est un numpy.matrix.
183 Différences finies centrées (approximation d'ordre 2):
184 1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
185 dX[i] à la composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
186 on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
188 2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
190 3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
192 Différences finies non centrées (approximation d'ordre 1):
193 1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
194 composante X[i] pour composer X_plus_dXi, et on calcule la réponse
195 HX_plus_dXi = H( X_plus_dXi )
196 2/ On calcule la valeur centrale HX = H(X)
197 3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
199 4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
202 logging.debug("FDA Début du calcul de la Jacobienne")
203 logging.debug("FDA Incrément de............: %s*X"%float(self.__increment))
204 logging.debug("FDA Approximation centrée...: %s"%(self.__centeredDF))
206 if X is None or len(X)==0:
207 raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
209 _X = numpy.asmatrix(numpy.ravel( X )).T
211 if self.__dX is None:
212 _dX = self.__increment * _X
214 _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
216 if (_dX == 0.).any():
219 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
221 _dX = numpy.where( _dX == 0., moyenne, _dX )
223 __alreadyCalculated = False
225 __bidon, __alreadyCalculatedP = self.__doublon__(_X, self.__listJPCP, self.__listJPPN, None)
226 __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
227 if __alreadyCalculatedP == __alreadyCalculatedI > -1:
228 __alreadyCalculated, __i = True, __alreadyCalculatedP
229 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
231 if __alreadyCalculated:
232 logging.debug("FDA Calcul Jacobienne (par récupération du doublon %i)"%__i)
233 _Jacobienne = self.__listJPCR[__i]
235 logging.debug("FDA Calcul Jacobienne (explicite)")
236 if self.__centeredDF:
238 if self.__mpEnabled and not self.__mfEnabled:
240 "__userFunction__path" : self.__userFunction__path,
241 "__userFunction__modl" : self.__userFunction__modl,
242 "__userFunction__name" : self.__userFunction__name,
245 for i in range( len(_dX) ):
247 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
248 _X_plus_dXi[i] = _X[i] + _dXi
249 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
250 _X_moins_dXi[i] = _X[i] - _dXi
252 _jobs.append( (_X_plus_dXi, funcrepr) )
253 _jobs.append( (_X_moins_dXi, funcrepr) )
255 import multiprocessing
256 self.__pool = multiprocessing.Pool(self.__mpWorkers)
257 _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
262 for i in range( len(_dX) ):
263 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
265 elif self.__mfEnabled:
267 for i in range( len(_dX) ):
269 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
270 _X_plus_dXi[i] = _X[i] + _dXi
271 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
272 _X_moins_dXi[i] = _X[i] - _dXi
274 _xserie.append( _X_plus_dXi )
275 _xserie.append( _X_moins_dXi )
277 _HX_plusmoins_dX = self.DirectOperator( _xserie )
280 for i in range( len(_dX) ):
281 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
285 for i in range( _dX.size ):
287 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
288 _X_plus_dXi[i] = _X[i] + _dXi
289 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
290 _X_moins_dXi[i] = _X[i] - _dXi
292 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
293 _HX_moins_dXi = self.DirectOperator( _X_moins_dXi )
295 _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
299 if self.__mpEnabled and not self.__mfEnabled:
301 "__userFunction__path" : self.__userFunction__path,
302 "__userFunction__modl" : self.__userFunction__modl,
303 "__userFunction__name" : self.__userFunction__name,
306 _jobs.append( (_X.A1, funcrepr) )
307 for i in range( len(_dX) ):
308 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
309 _X_plus_dXi[i] = _X[i] + _dX[i]
311 _jobs.append( (_X_plus_dXi, funcrepr) )
313 import multiprocessing
314 self.__pool = multiprocessing.Pool(self.__mpWorkers)
315 _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
319 _HX = _HX_plus_dX.pop(0)
322 for i in range( len(_dX) ):
323 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
325 elif self.__mfEnabled:
327 _xserie.append( _X.A1 )
328 for i in range( len(_dX) ):
329 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
330 _X_plus_dXi[i] = _X[i] + _dX[i]
332 _xserie.append( _X_plus_dXi )
334 _HX_plus_dX = self.DirectOperator( _xserie )
336 _HX = _HX_plus_dX.pop(0)
339 for i in range( len(_dX) ):
340 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
344 _HX = self.DirectOperator( _X )
345 for i in range( _dX.size ):
347 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
348 _X_plus_dXi[i] = _X[i] + _dXi
350 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
352 _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
355 _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
357 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
358 while len(self.__listJPCP) > self.__lenghtRJ:
359 self.__listJPCP.pop(0)
360 self.__listJPCI.pop(0)
361 self.__listJPCR.pop(0)
362 self.__listJPPN.pop(0)
363 self.__listJPIN.pop(0)
364 self.__listJPCP.append( copy.copy(_X) )
365 self.__listJPCI.append( copy.copy(_dX) )
366 self.__listJPCR.append( copy.copy(_Jacobienne) )
367 self.__listJPPN.append( numpy.linalg.norm(_X) )
368 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
370 logging.debug("FDA Fin du calcul de la Jacobienne")
374 # ---------------------------------------------------------
375 def TangentOperator(self, paire ):
377 Calcul du tangent à l'aide de la Jacobienne.
380 assert len(paire) == 1, "Incorrect lenght of arguments"
382 assert len(_paire) == 2, "Incorrect number of arguments"
384 assert len(paire) == 2, "Incorrect number of arguments"
387 _Jacobienne = self.TangentMatrix( X )
388 if dX is None or len(dX) == 0:
390 # Calcul de la forme matricielle si le second argument est None
391 # -------------------------------------------------------------
392 if self.__mfEnabled: return [_Jacobienne,]
393 else: return _Jacobienne
396 # Calcul de la valeur linéarisée de H en X appliqué à dX
397 # ------------------------------------------------------
398 _dX = numpy.asmatrix(numpy.ravel( dX )).T
399 _HtX = numpy.dot(_Jacobienne, _dX)
400 if self.__mfEnabled: return [_HtX.A1,]
403 # ---------------------------------------------------------
404 def AdjointOperator(self, paire ):
406 Calcul de l'adjoint à l'aide de la Jacobienne.
409 assert len(paire) == 1, "Incorrect lenght of arguments"
411 assert len(_paire) == 2, "Incorrect number of arguments"
413 assert len(paire) == 2, "Incorrect number of arguments"
416 _JacobienneT = self.TangentMatrix( X ).T
417 if Y is None or len(Y) == 0:
419 # Calcul de la forme matricielle si le second argument est None
420 # -------------------------------------------------------------
421 if self.__mfEnabled: return [_JacobienneT,]
422 else: return _JacobienneT
425 # Calcul de la valeur de l'adjoint en X appliqué à Y
426 # --------------------------------------------------
427 _Y = numpy.asmatrix(numpy.ravel( Y )).T
428 _HaY = numpy.dot(_JacobienneT, _Y)
429 if self.__mfEnabled: return [_HaY.A1,]
432 # ==============================================================================
444 Implémentation informatique de l'algorithme MMQR, basée sur la publication :
445 David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
446 Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
449 # Recuperation des donnees et informations initiales
450 # --------------------------------------------------
451 variables = numpy.ravel( x0 )
452 mesures = numpy.ravel( y )
453 increment = sys.float_info[0]
456 quantile = float(quantile)
458 # Calcul des parametres du MM
459 # ---------------------------
460 tn = float(toler) / n
461 e0 = -tn / math.log(tn)
462 epsilon = (e0-tn)/(1+math.log(e0))
464 # Calculs d'initialisation
465 # ------------------------
466 residus = mesures - numpy.ravel( func( variables ) )
467 poids = 1./(epsilon+numpy.abs(residus))
468 veps = 1. - 2. * quantile - residus * poids
469 lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
472 # Recherche iterative
473 # -------------------
474 while (increment > toler) and (iteration < maxfun) :
477 Derivees = numpy.array(fprime(variables))
478 Derivees = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
479 DeriveesT = Derivees.transpose()
480 M = numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
481 SM = numpy.transpose(numpy.dot( DeriveesT , veps ))
482 step = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
484 variables = variables + step
485 if bounds is not None:
486 # Attention : boucle infinie à éviter si un intervalle est trop petit
487 while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
489 variables = variables - step
490 residus = mesures - numpy.ravel( func(variables) )
491 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
493 while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
495 variables = variables - step
496 residus = mesures - numpy.ravel( func(variables) )
497 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
499 increment = lastsurrogate-surrogate
500 poids = 1./(epsilon+numpy.abs(residus))
501 veps = 1. - 2. * quantile - residus * poids
502 lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
506 Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
508 return variables, Ecart, [n,p,iteration,increment,0]
510 # ==============================================================================
511 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
512 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
514 _bgcenter = numpy.ravel(_bgcenter)[:,None]
516 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
518 if _bgcovariance is None:
519 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
521 _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
522 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
524 return BackgroundEnsemble
526 # ==============================================================================
527 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
528 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
529 def __CenteredRandomAnomalies(Zr, N):
531 Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
532 notes manuscrites de MB et conforme au code de PS avec eps = -1
535 Q = numpy.eye(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
536 Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
537 R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
542 _bgcenter = numpy.ravel(_bgcenter)[:,None]
544 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
545 if _bgcovariance is None:
546 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
549 U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
550 _nbctl = _bgcenter.size
551 if _nbmembers > _nbctl:
552 _Z = numpy.concatenate((numpy.dot(
553 numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
554 numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
556 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
557 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
558 BackgroundEnsemble = _bgcenter + _Zca
560 if max(abs(_bgcovariance.flatten())) > 0:
561 _nbctl = _bgcenter.size
562 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
563 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
564 BackgroundEnsemble = _bgcenter + _Zca
566 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
568 return BackgroundEnsemble
570 # ==============================================================================
571 def EnsembleOfAnomalies( _ensemble, _optmean = None):
572 "Renvoie les anomalies centrées à partir d'un ensemble TailleEtat*NbMembres"
574 Em = numpy.asarray(_ensemble).mean(axis=1, dtype=mfp).astype('float')[:,numpy.newaxis]
576 Em = numpy.ravel(_optmean)[:,numpy.newaxis]
578 return numpy.asarray(_ensemble) - Em
580 # ==============================================================================
581 def CovarianceInflation(
583 InflationType = None,
584 InflationFactor = None,
585 BackgroundCov = None,
588 Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
590 Synthèse : Hunt 2007, section 2.3.5
592 if InflationFactor is None:
595 InflationFactor = float(InflationFactor)
597 if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
598 if InflationFactor < 1.:
599 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
600 if InflationFactor < 1.+mpr:
602 OutputCovOrEns = InflationFactor**2 * InputCovOrEns
604 elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
605 if InflationFactor < 1.:
606 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
607 if InflationFactor < 1.+mpr:
609 InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
610 OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
611 + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
613 elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
614 if InflationFactor < 0.:
615 raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
616 if InflationFactor < mpr:
618 __n, __m = numpy.asarray(InputCovOrEns).shape
620 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
621 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.eye(__n)
623 elif InflationType == "HybridOnBackgroundCovariance":
624 if InflationFactor < 0.:
625 raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
626 if InflationFactor < mpr:
628 __n, __m = numpy.asarray(InputCovOrEns).shape
630 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
631 if BackgroundCov is None:
632 raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
633 if InputCovOrEns.shape != BackgroundCov.shape:
634 raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
635 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
637 elif InflationType == "Relaxation":
638 raise NotImplementedError("InflationType Relaxation")
641 raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
643 return OutputCovOrEns
645 # ==============================================================================
646 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
648 3DVAR multi-pas et multi-méthodes
653 Xn = numpy.ravel(Xb).reshape((-1,1))
655 if selfA._parameters["EstimationOf"] == "State":
656 M = EM["Direct"].appliedTo
658 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
659 selfA.StoredVariables["Analysis"].store( Xn )
660 if selfA._toStore("APosterioriCovariance"):
661 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(Xn.size)
663 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
664 if selfA._toStore("ForecastState"):
665 selfA.StoredVariables["ForecastState"].store( Xn )
667 if hasattr(Y,"stepnumber"):
668 duration = Y.stepnumber()
674 for step in range(duration-1):
675 if hasattr(Y,"store"):
676 Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
678 Ynpu = numpy.ravel( Y ).reshape((-1,1))
680 if selfA._parameters["EstimationOf"] == "State": # Forecast
681 Xn = selfA.StoredVariables["Analysis"][-1]
682 Xn_predicted = M( Xn )
683 if selfA._toStore("ForecastState"):
684 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
685 elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
686 # --- > Par principe, M = Id, Q = 0
688 Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
690 oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
694 # ==============================================================================
695 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
704 Hm = HO["Direct"].appliedTo
705 Ha = HO["Adjoint"].appliedInXTo
707 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
708 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
709 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
712 HXb = numpy.asmatrix(numpy.ravel( HXb )).T
713 if Y.size != HXb.size:
714 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
715 if max(Y.shape) != max(HXb.shape):
716 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
718 if selfA._toStore("JacobianMatrixAtBackground"):
719 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
720 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
721 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
723 # Précalcul des inversions de B et R
727 # Point de démarrage de l'optimisation
728 Xini = selfA._parameters["InitializationPoint"]
730 # Définition de la fonction-coût
731 # ------------------------------
733 _X = numpy.asmatrix(numpy.ravel( x )).T
734 if selfA._parameters["StoreInternalVariables"] or \
735 selfA._toStore("CurrentState") or \
736 selfA._toStore("CurrentOptimum"):
737 selfA.StoredVariables["CurrentState"].store( _X )
739 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
740 _Innovation = Y - _HX
741 if selfA._toStore("SimulatedObservationAtCurrentState") or \
742 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
743 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
744 if selfA._toStore("InnovationAtCurrentState"):
745 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
747 Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
748 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
751 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
752 selfA.StoredVariables["CostFunctionJb"].store( Jb )
753 selfA.StoredVariables["CostFunctionJo"].store( Jo )
754 selfA.StoredVariables["CostFunctionJ" ].store( J )
755 if selfA._toStore("IndexOfOptimum") or \
756 selfA._toStore("CurrentOptimum") or \
757 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
758 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
759 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
760 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
761 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
762 if selfA._toStore("IndexOfOptimum"):
763 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
764 if selfA._toStore("CurrentOptimum"):
765 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
766 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
767 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
768 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
769 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
770 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
771 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
772 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
773 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
776 def GradientOfCostFunction(x):
777 _X = numpy.asmatrix(numpy.ravel( x )).T
779 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
780 GradJb = BI * (_X - Xb)
781 GradJo = - Ha( (_X, RI * (Y - _HX)) )
782 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
785 # Minimisation de la fonctionnelle
786 # --------------------------------
787 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
789 if selfA._parameters["Minimizer"] == "LBFGSB":
790 if "0.19" <= scipy.version.version <= "1.1.0":
791 import lbfgsbhlt as optimiseur
793 import scipy.optimize as optimiseur
794 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
797 fprime = GradientOfCostFunction,
799 bounds = selfA._parameters["Bounds"],
800 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
801 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
802 pgtol = selfA._parameters["ProjectedGradientTolerance"],
803 iprint = selfA._parameters["optiprint"],
805 nfeval = Informations['funcalls']
806 rc = Informations['warnflag']
807 elif selfA._parameters["Minimizer"] == "TNC":
808 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
811 fprime = GradientOfCostFunction,
813 bounds = selfA._parameters["Bounds"],
814 maxfun = selfA._parameters["MaximumNumberOfSteps"],
815 pgtol = selfA._parameters["ProjectedGradientTolerance"],
816 ftol = selfA._parameters["CostDecrementTolerance"],
817 messages = selfA._parameters["optmessages"],
819 elif selfA._parameters["Minimizer"] == "CG":
820 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
823 fprime = GradientOfCostFunction,
825 maxiter = selfA._parameters["MaximumNumberOfSteps"],
826 gtol = selfA._parameters["GradientNormTolerance"],
827 disp = selfA._parameters["optdisp"],
830 elif selfA._parameters["Minimizer"] == "NCG":
831 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
834 fprime = GradientOfCostFunction,
836 maxiter = selfA._parameters["MaximumNumberOfSteps"],
837 avextol = selfA._parameters["CostDecrementTolerance"],
838 disp = selfA._parameters["optdisp"],
841 elif selfA._parameters["Minimizer"] == "BFGS":
842 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
845 fprime = GradientOfCostFunction,
847 maxiter = selfA._parameters["MaximumNumberOfSteps"],
848 gtol = selfA._parameters["GradientNormTolerance"],
849 disp = selfA._parameters["optdisp"],
853 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
855 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
856 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
858 # Correction pour pallier a un bug de TNC sur le retour du Minimum
859 # ----------------------------------------------------------------
860 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
861 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
863 # Obtention de l'analyse
864 # ----------------------
865 Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
867 selfA.StoredVariables["Analysis"].store( Xa )
869 if selfA._toStore("OMA") or \
870 selfA._toStore("SigmaObs2") or \
871 selfA._toStore("SimulationQuantiles") or \
872 selfA._toStore("SimulatedObservationAtOptimum"):
873 if selfA._toStore("SimulatedObservationAtCurrentState"):
874 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
875 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
876 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
880 # Calcul de la covariance d'analyse
881 # ---------------------------------
882 if selfA._toStore("APosterioriCovariance") or \
883 selfA._toStore("SimulationQuantiles") or \
884 selfA._toStore("JacobianMatrixAtOptimum") or \
885 selfA._toStore("KalmanGainAtOptimum"):
886 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
887 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
888 if selfA._toStore("APosterioriCovariance") or \
889 selfA._toStore("SimulationQuantiles") or \
890 selfA._toStore("KalmanGainAtOptimum"):
891 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
892 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
893 if selfA._toStore("APosterioriCovariance") or \
894 selfA._toStore("SimulationQuantiles"):
898 _ee = numpy.matrix(numpy.zeros(nb)).T
900 _HtEE = numpy.dot(HtM,_ee)
901 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
902 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
903 HessienneI = numpy.matrix( HessienneI )
905 if min(A.shape) != max(A.shape):
906 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
907 if (numpy.diag(A) < 0).any():
908 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
909 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
911 L = numpy.linalg.cholesky( A )
913 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
914 if selfA._toStore("APosterioriCovariance"):
915 selfA.StoredVariables["APosterioriCovariance"].store( A )
916 if selfA._toStore("JacobianMatrixAtOptimum"):
917 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
918 if selfA._toStore("KalmanGainAtOptimum"):
919 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
920 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
921 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
923 # Calculs et/ou stockages supplémentaires
924 # ---------------------------------------
925 if selfA._toStore("Innovation") or \
926 selfA._toStore("SigmaObs2") or \
927 selfA._toStore("MahalanobisConsistency") or \
928 selfA._toStore("OMB"):
930 if selfA._toStore("Innovation"):
931 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
932 if selfA._toStore("BMA"):
933 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
934 if selfA._toStore("OMA"):
935 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
936 if selfA._toStore("OMB"):
937 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
938 if selfA._toStore("SigmaObs2"):
939 TraceR = R.trace(Y.size)
940 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
941 if selfA._toStore("MahalanobisConsistency"):
942 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
943 if selfA._toStore("SimulationQuantiles"):
944 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
945 HXa = numpy.matrix(numpy.ravel( HXa )).T
947 for i in range(nech):
948 if selfA._parameters["SimulationForQuantiles"] == "Linear":
949 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
950 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
952 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
953 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
954 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
958 YfQ = numpy.hstack((YfQ,Yr))
961 for quantile in selfA._parameters["Quantiles"]:
962 if not (0. <= float(quantile) <= 1.): continue
963 indice = int(nech * float(quantile) - 1./nech)
964 if YQ is None: YQ = YfQ[:,indice]
965 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
966 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
967 if selfA._toStore("SimulatedObservationAtBackground"):
968 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
969 if selfA._toStore("SimulatedObservationAtOptimum"):
970 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
974 # ==============================================================================
975 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
977 3DVAR variational analysis with no inversion of B
984 Hm = HO["Direct"].appliedTo
985 Ha = HO["Adjoint"].appliedInXTo
987 # Précalcul des inversions de B et R
991 # Point de démarrage de l'optimisation
992 Xini = numpy.zeros(Xb.shape)
994 # Définition de la fonction-coût
995 # ------------------------------
997 _V = numpy.asmatrix(numpy.ravel( v )).T
999 if selfA._parameters["StoreInternalVariables"] or \
1000 selfA._toStore("CurrentState") or \
1001 selfA._toStore("CurrentOptimum"):
1002 selfA.StoredVariables["CurrentState"].store( _X )
1004 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
1005 _Innovation = Y - _HX
1006 if selfA._toStore("SimulatedObservationAtCurrentState") or \
1007 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1008 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
1009 if selfA._toStore("InnovationAtCurrentState"):
1010 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1012 Jb = float( 0.5 * _V.T * BT * _V )
1013 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1016 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1017 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1018 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1019 selfA.StoredVariables["CostFunctionJ" ].store( J )
1020 if selfA._toStore("IndexOfOptimum") or \
1021 selfA._toStore("CurrentOptimum") or \
1022 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1023 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1024 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1025 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1026 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1027 if selfA._toStore("IndexOfOptimum"):
1028 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1029 if selfA._toStore("CurrentOptimum"):
1030 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1031 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1032 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1033 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1034 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1035 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1036 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1037 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1038 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1041 def GradientOfCostFunction(v):
1042 _V = numpy.asmatrix(numpy.ravel( v )).T
1045 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
1047 GradJo = - Ha( (_X, RI * (Y - _HX)) )
1048 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1051 # Minimisation de la fonctionnelle
1052 # --------------------------------
1053 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1055 if selfA._parameters["Minimizer"] == "LBFGSB":
1056 if "0.19" <= scipy.version.version <= "1.1.0":
1057 import lbfgsbhlt as optimiseur
1059 import scipy.optimize as optimiseur
1060 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1061 func = CostFunction,
1063 fprime = GradientOfCostFunction,
1065 bounds = selfA._parameters["Bounds"],
1066 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
1067 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
1068 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1069 iprint = selfA._parameters["optiprint"],
1071 nfeval = Informations['funcalls']
1072 rc = Informations['warnflag']
1073 elif selfA._parameters["Minimizer"] == "TNC":
1074 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1075 func = CostFunction,
1077 fprime = GradientOfCostFunction,
1079 bounds = selfA._parameters["Bounds"],
1080 maxfun = selfA._parameters["MaximumNumberOfSteps"],
1081 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1082 ftol = selfA._parameters["CostDecrementTolerance"],
1083 messages = selfA._parameters["optmessages"],
1085 elif selfA._parameters["Minimizer"] == "CG":
1086 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1089 fprime = GradientOfCostFunction,
1091 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1092 gtol = selfA._parameters["GradientNormTolerance"],
1093 disp = selfA._parameters["optdisp"],
1096 elif selfA._parameters["Minimizer"] == "NCG":
1097 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1100 fprime = GradientOfCostFunction,
1102 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1103 avextol = selfA._parameters["CostDecrementTolerance"],
1104 disp = selfA._parameters["optdisp"],
1107 elif selfA._parameters["Minimizer"] == "BFGS":
1108 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1111 fprime = GradientOfCostFunction,
1113 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1114 gtol = selfA._parameters["GradientNormTolerance"],
1115 disp = selfA._parameters["optdisp"],
1119 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1121 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1122 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1124 # Correction pour pallier a un bug de TNC sur le retour du Minimum
1125 # ----------------------------------------------------------------
1126 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1127 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1128 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1130 Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
1132 # Obtention de l'analyse
1133 # ----------------------
1136 selfA.StoredVariables["Analysis"].store( Xa )
1138 if selfA._toStore("OMA") or \
1139 selfA._toStore("SigmaObs2") or \
1140 selfA._toStore("SimulationQuantiles") or \
1141 selfA._toStore("SimulatedObservationAtOptimum"):
1142 if selfA._toStore("SimulatedObservationAtCurrentState"):
1143 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1144 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1145 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1149 # Calcul de la covariance d'analyse
1150 # ---------------------------------
1151 if selfA._toStore("APosterioriCovariance") or \
1152 selfA._toStore("SimulationQuantiles") or \
1153 selfA._toStore("JacobianMatrixAtOptimum") or \
1154 selfA._toStore("KalmanGainAtOptimum"):
1155 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1156 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1157 if selfA._toStore("APosterioriCovariance") or \
1158 selfA._toStore("SimulationQuantiles") or \
1159 selfA._toStore("KalmanGainAtOptimum"):
1160 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1161 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1162 if selfA._toStore("APosterioriCovariance") or \
1163 selfA._toStore("SimulationQuantiles"):
1168 _ee = numpy.matrix(numpy.zeros(nb)).T
1170 _HtEE = numpy.dot(HtM,_ee)
1171 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
1172 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1173 HessienneI = numpy.matrix( HessienneI )
1175 if min(A.shape) != max(A.shape):
1176 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1177 if (numpy.diag(A) < 0).any():
1178 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1179 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1181 L = numpy.linalg.cholesky( A )
1183 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1184 if selfA._toStore("APosterioriCovariance"):
1185 selfA.StoredVariables["APosterioriCovariance"].store( A )
1186 if selfA._toStore("JacobianMatrixAtOptimum"):
1187 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1188 if selfA._toStore("KalmanGainAtOptimum"):
1189 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1190 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1191 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1193 # Calculs et/ou stockages supplémentaires
1194 # ---------------------------------------
1195 if selfA._toStore("Innovation") or \
1196 selfA._toStore("SigmaObs2") or \
1197 selfA._toStore("MahalanobisConsistency") or \
1198 selfA._toStore("OMB"):
1200 if selfA._toStore("Innovation"):
1201 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1202 if selfA._toStore("BMA"):
1203 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1204 if selfA._toStore("OMA"):
1205 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1206 if selfA._toStore("OMB"):
1207 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1208 if selfA._toStore("SigmaObs2"):
1209 TraceR = R.trace(Y.size)
1210 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1211 if selfA._toStore("MahalanobisConsistency"):
1212 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1213 if selfA._toStore("SimulationQuantiles"):
1214 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1215 HXa = numpy.matrix(numpy.ravel( HXa )).T
1217 for i in range(nech):
1218 if selfA._parameters["SimulationForQuantiles"] == "Linear":
1219 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1220 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1222 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1223 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1224 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1228 YfQ = numpy.hstack((YfQ,Yr))
1231 for quantile in selfA._parameters["Quantiles"]:
1232 if not (0. <= float(quantile) <= 1.): continue
1233 indice = int(nech * float(quantile) - 1./nech)
1234 if YQ is None: YQ = YfQ[:,indice]
1235 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
1236 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1237 if selfA._toStore("SimulatedObservationAtBackground"):
1238 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1239 if selfA._toStore("SimulatedObservationAtOptimum"):
1240 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1244 # ==============================================================================
1245 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1253 # Opérateur non-linéaire pour la boucle externe
1254 Hm = HO["Direct"].appliedTo
1256 # Précalcul des inversions de B et R
1260 # Point de démarrage de l'optimisation
1261 Xini = selfA._parameters["InitializationPoint"]
1263 HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
1264 Innovation = Y - HXb
1271 Xr = Xini.reshape((-1,1))
1272 while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
1276 Ht = HO["Tangent"].asMatrix(Xr)
1277 Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
1279 # Définition de la fonction-coût
1280 # ------------------------------
1281 def CostFunction(dx):
1282 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1283 if selfA._parameters["StoreInternalVariables"] or \
1284 selfA._toStore("CurrentState") or \
1285 selfA._toStore("CurrentOptimum"):
1286 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
1288 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1289 _dInnovation = Innovation - _HdX
1290 if selfA._toStore("SimulatedObservationAtCurrentState") or \
1291 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1292 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
1293 if selfA._toStore("InnovationAtCurrentState"):
1294 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
1296 Jb = float( 0.5 * _dX.T * BI * _dX )
1297 Jo = float( 0.5 * _dInnovation.T * RI * _dInnovation )
1300 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1301 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1302 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1303 selfA.StoredVariables["CostFunctionJ" ].store( J )
1304 if selfA._toStore("IndexOfOptimum") or \
1305 selfA._toStore("CurrentOptimum") or \
1306 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1307 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1308 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1309 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1310 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1311 if selfA._toStore("IndexOfOptimum"):
1312 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1313 if selfA._toStore("CurrentOptimum"):
1314 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1315 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1316 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1317 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1318 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1319 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1320 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1321 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1322 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1325 def GradientOfCostFunction(dx):
1326 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1328 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1329 _dInnovation = Innovation - _HdX
1331 GradJo = - Ht.T @ (RI * _dInnovation)
1332 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1335 # Minimisation de la fonctionnelle
1336 # --------------------------------
1337 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1339 if selfA._parameters["Minimizer"] == "LBFGSB":
1340 # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1341 if "0.19" <= scipy.version.version <= "1.1.0":
1342 import lbfgsbhlt as optimiseur
1344 import scipy.optimize as optimiseur
1345 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1346 func = CostFunction,
1347 x0 = numpy.zeros(Xini.size),
1348 fprime = GradientOfCostFunction,
1350 bounds = selfA._parameters["Bounds"],
1351 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
1352 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
1353 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1354 iprint = selfA._parameters["optiprint"],
1356 nfeval = Informations['funcalls']
1357 rc = Informations['warnflag']
1358 elif selfA._parameters["Minimizer"] == "TNC":
1359 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1360 func = CostFunction,
1361 x0 = numpy.zeros(Xini.size),
1362 fprime = GradientOfCostFunction,
1364 bounds = selfA._parameters["Bounds"],
1365 maxfun = selfA._parameters["MaximumNumberOfSteps"],
1366 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1367 ftol = selfA._parameters["CostDecrementTolerance"],
1368 messages = selfA._parameters["optmessages"],
1370 elif selfA._parameters["Minimizer"] == "CG":
1371 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1373 x0 = numpy.zeros(Xini.size),
1374 fprime = GradientOfCostFunction,
1376 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1377 gtol = selfA._parameters["GradientNormTolerance"],
1378 disp = selfA._parameters["optdisp"],
1381 elif selfA._parameters["Minimizer"] == "NCG":
1382 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1384 x0 = numpy.zeros(Xini.size),
1385 fprime = GradientOfCostFunction,
1387 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1388 avextol = selfA._parameters["CostDecrementTolerance"],
1389 disp = selfA._parameters["optdisp"],
1392 elif selfA._parameters["Minimizer"] == "BFGS":
1393 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1395 x0 = numpy.zeros(Xini.size),
1396 fprime = GradientOfCostFunction,
1398 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1399 gtol = selfA._parameters["GradientNormTolerance"],
1400 disp = selfA._parameters["optdisp"],
1404 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1406 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1407 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1409 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1410 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1411 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1413 Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
1416 DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
1417 iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
1419 # Obtention de l'analyse
1420 # ----------------------
1423 selfA.StoredVariables["Analysis"].store( Xa )
1425 if selfA._toStore("OMA") or \
1426 selfA._toStore("SigmaObs2") or \
1427 selfA._toStore("SimulationQuantiles") or \
1428 selfA._toStore("SimulatedObservationAtOptimum"):
1429 if selfA._toStore("SimulatedObservationAtCurrentState"):
1430 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1431 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1432 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1436 # Calcul de la covariance d'analyse
1437 # ---------------------------------
1438 if selfA._toStore("APosterioriCovariance") or \
1439 selfA._toStore("SimulationQuantiles") or \
1440 selfA._toStore("JacobianMatrixAtOptimum") or \
1441 selfA._toStore("KalmanGainAtOptimum"):
1442 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1443 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1444 if selfA._toStore("APosterioriCovariance") or \
1445 selfA._toStore("SimulationQuantiles") or \
1446 selfA._toStore("KalmanGainAtOptimum"):
1447 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1448 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1449 if selfA._toStore("APosterioriCovariance") or \
1450 selfA._toStore("SimulationQuantiles"):
1454 _ee = numpy.matrix(numpy.zeros(nb)).T
1456 _HtEE = numpy.dot(HtM,_ee)
1457 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
1458 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1459 HessienneI = numpy.matrix( HessienneI )
1461 if min(A.shape) != max(A.shape):
1462 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1463 if (numpy.diag(A) < 0).any():
1464 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1465 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1467 L = numpy.linalg.cholesky( A )
1469 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1470 if selfA._toStore("APosterioriCovariance"):
1471 selfA.StoredVariables["APosterioriCovariance"].store( A )
1472 if selfA._toStore("JacobianMatrixAtOptimum"):
1473 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1474 if selfA._toStore("KalmanGainAtOptimum"):
1475 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1476 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1477 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1479 # Calculs et/ou stockages supplémentaires
1480 # ---------------------------------------
1481 if selfA._toStore("Innovation") or \
1482 selfA._toStore("SigmaObs2") or \
1483 selfA._toStore("MahalanobisConsistency") or \
1484 selfA._toStore("OMB"):
1486 if selfA._toStore("Innovation"):
1487 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1488 if selfA._toStore("BMA"):
1489 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1490 if selfA._toStore("OMA"):
1491 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1492 if selfA._toStore("OMB"):
1493 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1494 if selfA._toStore("SigmaObs2"):
1495 TraceR = R.trace(Y.size)
1496 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1497 if selfA._toStore("MahalanobisConsistency"):
1498 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1499 if selfA._toStore("SimulationQuantiles"):
1500 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1501 HXa = numpy.matrix(numpy.ravel( HXa )).T
1503 for i in range(nech):
1504 if selfA._parameters["SimulationForQuantiles"] == "Linear":
1505 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1506 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1508 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1509 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1510 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1514 YfQ = numpy.hstack((YfQ,Yr))
1517 for quantile in selfA._parameters["Quantiles"]:
1518 if not (0. <= float(quantile) <= 1.): continue
1519 indice = int(nech * float(quantile) - 1./nech)
1520 if YQ is None: YQ = YfQ[:,indice]
1521 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
1522 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1523 if selfA._toStore("SimulatedObservationAtBackground"):
1524 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1525 if selfA._toStore("SimulatedObservationAtOptimum"):
1526 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1530 # ==============================================================================
1531 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1540 Hm = HO["Direct"].appliedTo
1542 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
1543 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
1544 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
1547 HXb = numpy.asmatrix(numpy.ravel( HXb )).T
1548 if Y.size != HXb.size:
1549 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
1550 if max(Y.shape) != max(HXb.shape):
1551 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
1553 if selfA._toStore("JacobianMatrixAtBackground"):
1554 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
1555 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
1556 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
1558 Ht = HO["Tangent"].asMatrix(Xb)
1560 HBHTpR = R + Ht * BHT
1561 Innovation = Y - HXb
1563 # Point de démarrage de l'optimisation
1564 Xini = numpy.zeros(Xb.shape)
1566 # Définition de la fonction-coût
1567 # ------------------------------
1568 def CostFunction(w):
1569 _W = numpy.asmatrix(numpy.ravel( w )).T
1570 if selfA._parameters["StoreInternalVariables"] or \
1571 selfA._toStore("CurrentState") or \
1572 selfA._toStore("CurrentOptimum"):
1573 selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
1574 if selfA._toStore("SimulatedObservationAtCurrentState") or \
1575 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1576 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
1577 if selfA._toStore("InnovationAtCurrentState"):
1578 selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
1580 Jb = float( 0.5 * _W.T * HBHTpR * _W )
1581 Jo = float( - _W.T * Innovation )
1584 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1585 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1586 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1587 selfA.StoredVariables["CostFunctionJ" ].store( J )
1588 if selfA._toStore("IndexOfOptimum") or \
1589 selfA._toStore("CurrentOptimum") or \
1590 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1591 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1592 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1593 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1594 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1595 if selfA._toStore("IndexOfOptimum"):
1596 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1597 if selfA._toStore("CurrentOptimum"):
1598 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1599 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1600 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1601 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1602 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1603 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1604 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1605 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1606 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1609 def GradientOfCostFunction(w):
1610 _W = numpy.asmatrix(numpy.ravel( w )).T
1611 GradJb = HBHTpR * _W
1612 GradJo = - Innovation
1613 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1616 # Minimisation de la fonctionnelle
1617 # --------------------------------
1618 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1620 if selfA._parameters["Minimizer"] == "LBFGSB":
1621 if "0.19" <= scipy.version.version <= "1.1.0":
1622 import lbfgsbhlt as optimiseur
1624 import scipy.optimize as optimiseur
1625 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1626 func = CostFunction,
1628 fprime = GradientOfCostFunction,
1630 bounds = selfA._parameters["Bounds"],
1631 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
1632 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
1633 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1634 iprint = selfA._parameters["optiprint"],
1636 nfeval = Informations['funcalls']
1637 rc = Informations['warnflag']
1638 elif selfA._parameters["Minimizer"] == "TNC":
1639 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1640 func = CostFunction,
1642 fprime = GradientOfCostFunction,
1644 bounds = selfA._parameters["Bounds"],
1645 maxfun = selfA._parameters["MaximumNumberOfSteps"],
1646 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1647 ftol = selfA._parameters["CostDecrementTolerance"],
1648 messages = selfA._parameters["optmessages"],
1650 elif selfA._parameters["Minimizer"] == "CG":
1651 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1654 fprime = GradientOfCostFunction,
1656 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1657 gtol = selfA._parameters["GradientNormTolerance"],
1658 disp = selfA._parameters["optdisp"],
1661 elif selfA._parameters["Minimizer"] == "NCG":
1662 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1665 fprime = GradientOfCostFunction,
1667 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1668 avextol = selfA._parameters["CostDecrementTolerance"],
1669 disp = selfA._parameters["optdisp"],
1672 elif selfA._parameters["Minimizer"] == "BFGS":
1673 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1676 fprime = GradientOfCostFunction,
1678 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1679 gtol = selfA._parameters["GradientNormTolerance"],
1680 disp = selfA._parameters["optdisp"],
1684 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1686 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1687 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1689 # Correction pour pallier a un bug de TNC sur le retour du Minimum
1690 # ----------------------------------------------------------------
1691 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1692 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1693 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1695 Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
1697 # Obtention de l'analyse
1698 # ----------------------
1701 selfA.StoredVariables["Analysis"].store( Xa )
1703 if selfA._toStore("OMA") or \
1704 selfA._toStore("SigmaObs2") or \
1705 selfA._toStore("SimulationQuantiles") or \
1706 selfA._toStore("SimulatedObservationAtOptimum"):
1707 if selfA._toStore("SimulatedObservationAtCurrentState"):
1708 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1709 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1710 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1714 # Calcul de la covariance d'analyse
1715 # ---------------------------------
1716 if selfA._toStore("APosterioriCovariance") or \
1717 selfA._toStore("SimulationQuantiles") or \
1718 selfA._toStore("JacobianMatrixAtOptimum") or \
1719 selfA._toStore("KalmanGainAtOptimum"):
1720 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1721 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1722 if selfA._toStore("APosterioriCovariance") or \
1723 selfA._toStore("SimulationQuantiles") or \
1724 selfA._toStore("KalmanGainAtOptimum"):
1725 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1726 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1727 if selfA._toStore("APosterioriCovariance") or \
1728 selfA._toStore("SimulationQuantiles"):
1734 _ee = numpy.matrix(numpy.zeros(nb)).T
1736 _HtEE = numpy.dot(HtM,_ee)
1737 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
1738 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1739 HessienneI = numpy.matrix( HessienneI )
1741 if min(A.shape) != max(A.shape):
1742 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1743 if (numpy.diag(A) < 0).any():
1744 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1745 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1747 L = numpy.linalg.cholesky( A )
1749 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1750 if selfA._toStore("APosterioriCovariance"):
1751 selfA.StoredVariables["APosterioriCovariance"].store( A )
1752 if selfA._toStore("JacobianMatrixAtOptimum"):
1753 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1754 if selfA._toStore("KalmanGainAtOptimum"):
1755 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1756 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1757 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1759 # Calculs et/ou stockages supplémentaires
1760 # ---------------------------------------
1761 if selfA._toStore("Innovation") or \
1762 selfA._toStore("SigmaObs2") or \
1763 selfA._toStore("MahalanobisConsistency") or \
1764 selfA._toStore("OMB"):
1766 if selfA._toStore("Innovation"):
1767 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1768 if selfA._toStore("BMA"):
1769 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1770 if selfA._toStore("OMA"):
1771 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1772 if selfA._toStore("OMB"):
1773 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1774 if selfA._toStore("SigmaObs2"):
1775 TraceR = R.trace(Y.size)
1776 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1777 if selfA._toStore("MahalanobisConsistency"):
1778 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1779 if selfA._toStore("SimulationQuantiles"):
1780 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1781 HXa = numpy.matrix(numpy.ravel( HXa )).T
1783 for i in range(nech):
1784 if selfA._parameters["SimulationForQuantiles"] == "Linear":
1785 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1786 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1788 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1789 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1790 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1794 YfQ = numpy.hstack((YfQ,Yr))
1797 for quantile in selfA._parameters["Quantiles"]:
1798 if not (0. <= float(quantile) <= 1.): continue
1799 indice = int(nech * float(quantile) - 1./nech)
1800 if YQ is None: YQ = YfQ[:,indice]
1801 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
1802 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1803 if selfA._toStore("SimulatedObservationAtBackground"):
1804 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1805 if selfA._toStore("SimulatedObservationAtOptimum"):
1806 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1810 # ==============================================================================
1811 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1820 Hm = HO["Direct"].appliedControledFormTo
1821 Mm = EM["Direct"].appliedControledFormTo
1823 if CM is not None and "Tangent" in CM and U is not None:
1824 Cm = CM["Tangent"].asMatrix(Xb)
1830 if hasattr(U,"store") and 1<=_step<len(U) :
1831 _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
1832 elif hasattr(U,"store") and len(U)==1:
1833 _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1835 _Un = numpy.asmatrix(numpy.ravel( U )).T
1840 if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
1841 _Cm = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
1847 # Remarque : les observations sont exploitées à partir du pas de temps
1848 # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
1849 # Donc le pas 0 n'est pas utilisé puisque la première étape commence
1850 # avec l'observation du pas 1.
1852 # Nombre de pas identique au nombre de pas d'observations
1853 if hasattr(Y,"stepnumber"):
1854 duration = Y.stepnumber()
1858 # Précalcul des inversions de B et R
1862 # Point de démarrage de l'optimisation
1863 Xini = selfA._parameters["InitializationPoint"]
1865 # Définition de la fonction-coût
1866 # ------------------------------
1867 selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
1868 selfA.DirectInnovation = [None,] # Le pas 0 n'est pas observé
1869 def CostFunction(x):
1870 _X = numpy.asmatrix(numpy.ravel( x )).T
1871 if selfA._parameters["StoreInternalVariables"] or \
1872 selfA._toStore("CurrentState") or \
1873 selfA._toStore("CurrentOptimum"):
1874 selfA.StoredVariables["CurrentState"].store( _X )
1875 Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
1876 selfA.DirectCalculation = [None,]
1877 selfA.DirectInnovation = [None,]
1880 for step in range(0,duration-1):
1881 if hasattr(Y,"store"):
1882 _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
1884 _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
1888 if selfA._parameters["EstimationOf"] == "State":
1889 _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
1890 elif selfA._parameters["EstimationOf"] == "Parameters":
1893 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1894 _Xn = numpy.max(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
1895 _Xn = numpy.min(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
1897 # Etape de différence aux observations
1898 if selfA._parameters["EstimationOf"] == "State":
1899 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
1900 elif selfA._parameters["EstimationOf"] == "Parameters":
1901 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
1903 # Stockage de l'état
1904 selfA.DirectCalculation.append( _Xn )
1905 selfA.DirectInnovation.append( _YmHMX )
1907 # Ajout dans la fonctionnelle d'observation
1908 Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
1911 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1912 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1913 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1914 selfA.StoredVariables["CostFunctionJ" ].store( J )
1915 if selfA._toStore("IndexOfOptimum") or \
1916 selfA._toStore("CurrentOptimum") or \
1917 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1918 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1919 selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1920 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1921 if selfA._toStore("IndexOfOptimum"):
1922 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1923 if selfA._toStore("CurrentOptimum"):
1924 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1925 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1926 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1927 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1928 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1929 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1930 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1933 def GradientOfCostFunction(x):
1934 _X = numpy.asmatrix(numpy.ravel( x )).T
1935 GradJb = BI * (_X - Xb)
1937 for step in range(duration-1,0,-1):
1938 # Etape de récupération du dernier stockage de l'évolution
1939 _Xn = selfA.DirectCalculation.pop()
1940 # Etape de récupération du dernier stockage de l'innovation
1941 _YmHMX = selfA.DirectInnovation.pop()
1942 # Calcul des adjoints
1943 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
1944 Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
1945 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
1946 Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
1947 # Calcul du gradient par etat adjoint
1948 GradJo = GradJo + Ha * RI * _YmHMX # Equivaut pour Ha lineaire à : Ha( (_Xn, RI * _YmHMX) )
1949 GradJo = Ma * GradJo # Equivaut pour Ma lineaire à : Ma( (_Xn, GradJo) )
1950 GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
1953 # Minimisation de la fonctionnelle
1954 # --------------------------------
1955 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1957 if selfA._parameters["Minimizer"] == "LBFGSB":
1958 if "0.19" <= scipy.version.version <= "1.1.0":
1959 import lbfgsbhlt as optimiseur
1961 import scipy.optimize as optimiseur
1962 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1963 func = CostFunction,
1965 fprime = GradientOfCostFunction,
1967 bounds = selfA._parameters["Bounds"],
1968 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
1969 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
1970 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1971 iprint = selfA._parameters["optiprint"],
1973 nfeval = Informations['funcalls']
1974 rc = Informations['warnflag']
1975 elif selfA._parameters["Minimizer"] == "TNC":
1976 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1977 func = CostFunction,
1979 fprime = GradientOfCostFunction,
1981 bounds = selfA._parameters["Bounds"],
1982 maxfun = selfA._parameters["MaximumNumberOfSteps"],
1983 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1984 ftol = selfA._parameters["CostDecrementTolerance"],
1985 messages = selfA._parameters["optmessages"],
1987 elif selfA._parameters["Minimizer"] == "CG":
1988 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1991 fprime = GradientOfCostFunction,
1993 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1994 gtol = selfA._parameters["GradientNormTolerance"],
1995 disp = selfA._parameters["optdisp"],
1998 elif selfA._parameters["Minimizer"] == "NCG":
1999 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2002 fprime = GradientOfCostFunction,
2004 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2005 avextol = selfA._parameters["CostDecrementTolerance"],
2006 disp = selfA._parameters["optdisp"],
2009 elif selfA._parameters["Minimizer"] == "BFGS":
2010 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2013 fprime = GradientOfCostFunction,
2015 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2016 gtol = selfA._parameters["GradientNormTolerance"],
2017 disp = selfA._parameters["optdisp"],
2021 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2023 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2024 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2026 # Correction pour pallier a un bug de TNC sur le retour du Minimum
2027 # ----------------------------------------------------------------
2028 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2029 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2031 # Obtention de l'analyse
2032 # ----------------------
2033 Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
2035 selfA.StoredVariables["Analysis"].store( Xa )
2037 # Calculs et/ou stockages supplémentaires
2038 # ---------------------------------------
2039 if selfA._toStore("BMA"):
2040 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2044 # ==============================================================================
2045 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
2049 if selfA._parameters["EstimationOf"] == "Parameters":
2050 selfA._parameters["StoreInternalVariables"] = True
2054 H = HO["Direct"].appliedControledFormTo
2056 if selfA._parameters["EstimationOf"] == "State":
2057 M = EM["Direct"].appliedControledFormTo
2059 if CM is not None and "Tangent" in CM and U is not None:
2060 Cm = CM["Tangent"].asMatrix(Xb)
2064 # Nombre de pas identique au nombre de pas d'observations
2065 # -------------------------------------------------------
2066 if hasattr(Y,"stepnumber"):
2067 duration = Y.stepnumber()
2068 __p = numpy.cumprod(Y.shape())[-1]
2071 __p = numpy.array(Y).size
2073 # Précalcul des inversions de B et R
2074 # ----------------------------------
2075 if selfA._parameters["StoreInternalVariables"] \
2076 or selfA._toStore("CostFunctionJ") \
2077 or selfA._toStore("CostFunctionJb") \
2078 or selfA._toStore("CostFunctionJo") \
2079 or selfA._toStore("CurrentOptimum") \
2080 or selfA._toStore("APosterioriCovariance"):
2087 __m = selfA._parameters["NumberOfMembers"]
2088 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2090 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2092 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2094 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2096 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2097 selfA.StoredVariables["Analysis"].store( Xb )
2098 if selfA._toStore("APosterioriCovariance"):
2099 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2102 previousJMinimum = numpy.finfo(float).max
2104 for step in range(duration-1):
2105 if hasattr(Y,"store"):
2106 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
2108 Ynpu = numpy.ravel( Y ).reshape((__p,-1))
2111 if hasattr(U,"store") and len(U)>1:
2112 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2113 elif hasattr(U,"store") and len(U)==1:
2114 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2116 Un = numpy.asmatrix(numpy.ravel( U )).T
2120 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2121 Xn = CovarianceInflation( Xn,
2122 selfA._parameters["InflationType"],
2123 selfA._parameters["InflationFactor"],
2126 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2127 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2129 returnSerieAsArrayMatrix = True )
2130 qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2131 Xn_predicted = EMX + qi
2132 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2134 returnSerieAsArrayMatrix = True )
2135 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2136 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2137 Xn_predicted = Xn_predicted + Cm * Un
2138 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2139 # --- > Par principe, M = Id, Q = 0
2141 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2143 returnSerieAsArrayMatrix = True )
2145 # Mean of forecast and observation of forecast
2146 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2147 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
2149 #--------------------------
2150 if VariantM == "KalmanFilterFormula05":
2151 PfHT, HPfHT = 0., 0.
2152 for i in range(__m):
2153 Exfi = Xn_predicted[:,i].reshape((__n,-1)) - Xfm
2154 Eyfi = HX_predicted[:,i].reshape((__p,-1)) - Hfm
2155 PfHT += Exfi * Eyfi.T
2156 HPfHT += Eyfi * Eyfi.T
2157 PfHT = (1./(__m-1)) * PfHT
2158 HPfHT = (1./(__m-1)) * HPfHT
2159 Kn = PfHT * ( R + HPfHT ).I
2162 for i in range(__m):
2163 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
2164 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
2165 #--------------------------
2166 elif VariantM == "KalmanFilterFormula16":
2167 EpY = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
2168 EpYm = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
2170 EaX = EnsembleOfAnomalies( Xn_predicted ) / numpy.sqrt(__m-1)
2171 EaY = (HX_predicted - Hfm - EpY + EpYm) / numpy.sqrt(__m-1)
2173 Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
2175 for i in range(__m):
2176 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
2177 #--------------------------
2179 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2181 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2182 Xn = CovarianceInflation( Xn,
2183 selfA._parameters["InflationType"],
2184 selfA._parameters["InflationFactor"],
2187 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2188 #--------------------------
2190 if selfA._parameters["StoreInternalVariables"] \
2191 or selfA._toStore("CostFunctionJ") \
2192 or selfA._toStore("CostFunctionJb") \
2193 or selfA._toStore("CostFunctionJo") \
2194 or selfA._toStore("APosterioriCovariance") \
2195 or selfA._toStore("InnovationAtCurrentAnalysis") \
2196 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2197 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2198 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2199 _Innovation = Ynpu - _HXa
2201 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2202 # ---> avec analysis
2203 selfA.StoredVariables["Analysis"].store( Xa )
2204 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2205 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2206 if selfA._toStore("InnovationAtCurrentAnalysis"):
2207 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2208 # ---> avec current state
2209 if selfA._parameters["StoreInternalVariables"] \
2210 or selfA._toStore("CurrentState"):
2211 selfA.StoredVariables["CurrentState"].store( Xn )
2212 if selfA._toStore("ForecastState"):
2213 selfA.StoredVariables["ForecastState"].store( EMX )
2214 if selfA._toStore("BMA"):
2215 selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
2216 if selfA._toStore("InnovationAtCurrentState"):
2217 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
2218 if selfA._toStore("SimulatedObservationAtCurrentState") \
2219 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2220 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2222 if selfA._parameters["StoreInternalVariables"] \
2223 or selfA._toStore("CostFunctionJ") \
2224 or selfA._toStore("CostFunctionJb") \
2225 or selfA._toStore("CostFunctionJo") \
2226 or selfA._toStore("CurrentOptimum") \
2227 or selfA._toStore("APosterioriCovariance"):
2228 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2229 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2231 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2232 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2233 selfA.StoredVariables["CostFunctionJ" ].store( J )
2235 if selfA._toStore("IndexOfOptimum") \
2236 or selfA._toStore("CurrentOptimum") \
2237 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2238 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2239 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2240 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2241 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2242 if selfA._toStore("IndexOfOptimum"):
2243 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2244 if selfA._toStore("CurrentOptimum"):
2245 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2246 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2247 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2248 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2249 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2250 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2251 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2252 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2253 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2254 if selfA._toStore("APosterioriCovariance"):
2255 Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2257 Pn = 0.5 * (Pn + Pn.T)
2258 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2259 if selfA._parameters["EstimationOf"] == "Parameters" \
2260 and J < previousJMinimum:
2261 previousJMinimum = J
2263 if selfA._toStore("APosterioriCovariance"):
2264 covarianceXaMin = Pn
2266 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2267 # ----------------------------------------------------------------------
2268 if selfA._parameters["EstimationOf"] == "Parameters":
2269 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2270 selfA.StoredVariables["Analysis"].store( XaMin )
2271 if selfA._toStore("APosterioriCovariance"):
2272 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2273 if selfA._toStore("BMA"):
2274 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2278 # ==============================================================================
2279 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
2281 Ensemble-Transform EnKF
2283 if selfA._parameters["EstimationOf"] == "Parameters":
2284 selfA._parameters["StoreInternalVariables"] = True
2288 H = HO["Direct"].appliedControledFormTo
2290 if selfA._parameters["EstimationOf"] == "State":
2291 M = EM["Direct"].appliedControledFormTo
2293 if CM is not None and "Tangent" in CM and U is not None:
2294 Cm = CM["Tangent"].asMatrix(Xb)
2298 # Nombre de pas identique au nombre de pas d'observations
2299 # -------------------------------------------------------
2300 if hasattr(Y,"stepnumber"):
2301 duration = Y.stepnumber()
2302 __p = numpy.cumprod(Y.shape())[-1]
2305 __p = numpy.array(Y).size
2307 # Précalcul des inversions de B et R
2308 # ----------------------------------
2309 if selfA._parameters["StoreInternalVariables"] \
2310 or selfA._toStore("CostFunctionJ") \
2311 or selfA._toStore("CostFunctionJb") \
2312 or selfA._toStore("CostFunctionJo") \
2313 or selfA._toStore("CurrentOptimum") \
2314 or selfA._toStore("APosterioriCovariance"):
2317 elif VariantM != "KalmanFilterFormula":
2319 if VariantM == "KalmanFilterFormula":
2320 RIdemi = R.choleskyI()
2325 __m = selfA._parameters["NumberOfMembers"]
2326 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2328 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2330 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2332 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2334 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2335 selfA.StoredVariables["Analysis"].store( Xb )
2336 if selfA._toStore("APosterioriCovariance"):
2337 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2340 previousJMinimum = numpy.finfo(float).max
2342 for step in range(duration-1):
2343 if hasattr(Y,"store"):
2344 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
2346 Ynpu = numpy.ravel( Y ).reshape((__p,-1))
2349 if hasattr(U,"store") and len(U)>1:
2350 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2351 elif hasattr(U,"store") and len(U)==1:
2352 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2354 Un = numpy.asmatrix(numpy.ravel( U )).T
2358 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2359 Xn = CovarianceInflation( Xn,
2360 selfA._parameters["InflationType"],
2361 selfA._parameters["InflationFactor"],
2364 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2365 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2367 returnSerieAsArrayMatrix = True )
2368 qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2369 Xn_predicted = EMX + qi
2370 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2372 returnSerieAsArrayMatrix = True )
2373 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2374 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2375 Xn_predicted = Xn_predicted + Cm * Un
2376 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2377 # --- > Par principe, M = Id, Q = 0
2379 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2381 returnSerieAsArrayMatrix = True )
2383 # Mean of forecast and observation of forecast
2384 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2385 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
2388 EaX = EnsembleOfAnomalies( Xn_predicted )
2389 EaHX = numpy.array(HX_predicted - Hfm)
2391 #--------------------------
2392 if VariantM == "KalmanFilterFormula":
2393 mS = RIdemi * EaHX / numpy.sqrt(__m-1)
2394 delta = RIdemi * ( Ynpu - Hfm )
2395 mT = numpy.linalg.inv( numpy.eye(__m) + mS.T @ mS )
2396 vw = mT @ mS.transpose() @ delta
2398 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
2401 EaX = EaX / numpy.sqrt(__m-1)
2402 Xn = Xfm + EaX @ ( vw.reshape((__m,-1)) + numpy.sqrt(__m-1) * Tdemi @ mU )
2403 #--------------------------
2404 elif VariantM == "Variational":
2405 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2406 def CostFunction(w):
2407 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2408 _Jo = 0.5 * _A.T @ (RI * _A)
2409 _Jb = 0.5 * (__m-1) * w.T @ w
2412 def GradientOfCostFunction(w):
2413 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2414 _GardJo = - EaHX.T @ (RI * _A)
2415 _GradJb = (__m-1) * w.reshape((__m,1))
2416 _GradJ = _GardJo + _GradJb
2417 return numpy.ravel(_GradJ)
2418 vw = scipy.optimize.fmin_cg(
2420 x0 = numpy.zeros(__m),
2421 fprime = GradientOfCostFunction,
2426 Hto = EaHX.T @ (RI * EaHX)
2427 Htb = (__m-1) * numpy.eye(__m)
2430 Pta = numpy.linalg.inv( Hta )
2431 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2433 Xn = Xfm + EaX @ (vw[:,None] + EWa)
2434 #--------------------------
2435 elif VariantM == "FiniteSize11": # Jauge Boc2011
2436 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2437 def CostFunction(w):
2438 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2439 _Jo = 0.5 * _A.T @ (RI * _A)
2440 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
2443 def GradientOfCostFunction(w):
2444 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2445 _GardJo = - EaHX.T @ (RI * _A)
2446 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
2447 _GradJ = _GardJo + _GradJb
2448 return numpy.ravel(_GradJ)
2449 vw = scipy.optimize.fmin_cg(
2451 x0 = numpy.zeros(__m),
2452 fprime = GradientOfCostFunction,
2457 Hto = EaHX.T @ (RI * EaHX)
2459 ( (1 + 1/__m + vw.T @ vw) * numpy.eye(__m) - 2 * vw @ vw.T ) \
2460 / (1 + 1/__m + vw.T @ vw)**2
2463 Pta = numpy.linalg.inv( Hta )
2464 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2466 Xn = Xfm + EaX @ (vw.reshape((__m,-1)) + EWa)
2467 #--------------------------
2468 elif VariantM == "FiniteSize15": # Jauge Boc2015
2469 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2470 def CostFunction(w):
2471 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2472 _Jo = 0.5 * _A.T * RI * _A
2473 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
2476 def GradientOfCostFunction(w):
2477 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2478 _GardJo = - EaHX.T @ (RI * _A)
2479 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
2480 _GradJ = _GardJo + _GradJb
2481 return numpy.ravel(_GradJ)
2482 vw = scipy.optimize.fmin_cg(
2484 x0 = numpy.zeros(__m),
2485 fprime = GradientOfCostFunction,
2490 Hto = EaHX.T @ (RI * EaHX)
2492 ( (1 + 1/__m + vw.T @ vw) * numpy.eye(__m) - 2 * vw @ vw.T ) \
2493 / (1 + 1/__m + vw.T @ vw)**2
2496 Pta = numpy.linalg.inv( Hta )
2497 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2499 Xn = Xfm + EaX @ (vw.reshape((__m,-1)) + EWa)
2500 #--------------------------
2501 elif VariantM == "FiniteSize16": # Jauge Boc2016
2502 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2503 def CostFunction(w):
2504 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2505 _Jo = 0.5 * _A.T @ (RI * _A)
2506 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
2509 def GradientOfCostFunction(w):
2510 _A = Ynpu - HXfm.reshape((__p,-1)) - (EaHX @ w).reshape((__p,-1))
2511 _GardJo = - EaHX.T @ (RI * _A)
2512 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
2513 _GradJ = _GardJo + _GradJb
2514 return numpy.ravel(_GradJ)
2515 vw = scipy.optimize.fmin_cg(
2517 x0 = numpy.zeros(__m),
2518 fprime = GradientOfCostFunction,
2523 Hto = EaHX.T @ (RI * EaHX)
2524 Htb = ((__m+1) / (__m-1)) * \
2525 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.eye(__m) - 2 * vw @ vw.T / (__m-1) ) \
2526 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
2529 Pta = numpy.linalg.inv( Hta )
2530 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2532 Xn = Xfm + EaX @ (vw[:,None] + EWa)
2533 #--------------------------
2535 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2537 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2538 Xn = CovarianceInflation( Xn,
2539 selfA._parameters["InflationType"],
2540 selfA._parameters["InflationFactor"],
2543 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2544 #--------------------------
2546 if selfA._parameters["StoreInternalVariables"] \
2547 or selfA._toStore("CostFunctionJ") \
2548 or selfA._toStore("CostFunctionJb") \
2549 or selfA._toStore("CostFunctionJo") \
2550 or selfA._toStore("APosterioriCovariance") \
2551 or selfA._toStore("InnovationAtCurrentAnalysis") \
2552 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2553 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2554 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2555 _Innovation = Ynpu - _HXa
2557 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2558 # ---> avec analysis
2559 selfA.StoredVariables["Analysis"].store( Xa )
2560 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2561 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2562 if selfA._toStore("InnovationAtCurrentAnalysis"):
2563 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2564 # ---> avec current state
2565 if selfA._parameters["StoreInternalVariables"] \
2566 or selfA._toStore("CurrentState"):
2567 selfA.StoredVariables["CurrentState"].store( Xn )
2568 if selfA._toStore("ForecastState"):
2569 selfA.StoredVariables["ForecastState"].store( EMX )
2570 if selfA._toStore("BMA"):
2571 selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
2572 if selfA._toStore("InnovationAtCurrentState"):
2573 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu.reshape((__p,1)) )
2574 if selfA._toStore("SimulatedObservationAtCurrentState") \
2575 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2576 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2578 if selfA._parameters["StoreInternalVariables"] \
2579 or selfA._toStore("CostFunctionJ") \
2580 or selfA._toStore("CostFunctionJb") \
2581 or selfA._toStore("CostFunctionJo") \
2582 or selfA._toStore("CurrentOptimum") \
2583 or selfA._toStore("APosterioriCovariance"):
2584 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2585 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2587 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2588 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2589 selfA.StoredVariables["CostFunctionJ" ].store( J )
2591 if selfA._toStore("IndexOfOptimum") \
2592 or selfA._toStore("CurrentOptimum") \
2593 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2594 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2595 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2596 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2597 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2598 if selfA._toStore("IndexOfOptimum"):
2599 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2600 if selfA._toStore("CurrentOptimum"):
2601 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2602 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2603 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2604 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2605 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2606 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2607 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2608 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2609 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2610 if selfA._toStore("APosterioriCovariance"):
2611 Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2613 Pn = 0.5 * (Pn + Pn.T)
2614 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2615 if selfA._parameters["EstimationOf"] == "Parameters" \
2616 and J < previousJMinimum:
2617 previousJMinimum = J
2619 if selfA._toStore("APosterioriCovariance"):
2620 covarianceXaMin = Pn
2622 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2623 # ----------------------------------------------------------------------
2624 if selfA._parameters["EstimationOf"] == "Parameters":
2625 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2626 selfA.StoredVariables["Analysis"].store( XaMin )
2627 if selfA._toStore("APosterioriCovariance"):
2628 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2629 if selfA._toStore("BMA"):
2630 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2634 # ==============================================================================
2635 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
2636 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
2638 Maximum Likelihood Ensemble Filter
2640 if selfA._parameters["EstimationOf"] == "Parameters":
2641 selfA._parameters["StoreInternalVariables"] = True
2645 H = HO["Direct"].appliedControledFormTo
2647 if selfA._parameters["EstimationOf"] == "State":
2648 M = EM["Direct"].appliedControledFormTo
2650 if CM is not None and "Tangent" in CM and U is not None:
2651 Cm = CM["Tangent"].asMatrix(Xb)
2655 # Nombre de pas identique au nombre de pas d'observations
2656 # -------------------------------------------------------
2657 if hasattr(Y,"stepnumber"):
2658 duration = Y.stepnumber()
2659 __p = numpy.cumprod(Y.shape())[-1]
2662 __p = numpy.array(Y).size
2664 # Précalcul des inversions de B et R
2665 # ----------------------------------
2666 if selfA._parameters["StoreInternalVariables"] \
2667 or selfA._toStore("CostFunctionJ") \
2668 or selfA._toStore("CostFunctionJb") \
2669 or selfA._toStore("CostFunctionJo") \
2670 or selfA._toStore("CurrentOptimum") \
2671 or selfA._toStore("APosterioriCovariance"):
2678 __m = selfA._parameters["NumberOfMembers"]
2679 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2681 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2683 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2685 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2687 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2688 selfA.StoredVariables["Analysis"].store( Xb )
2689 if selfA._toStore("APosterioriCovariance"):
2690 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2693 previousJMinimum = numpy.finfo(float).max
2695 for step in range(duration-1):
2696 if hasattr(Y,"store"):
2697 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
2699 Ynpu = numpy.ravel( Y ).reshape((__p,-1))
2702 if hasattr(U,"store") and len(U)>1:
2703 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2704 elif hasattr(U,"store") and len(U)==1:
2705 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2707 Un = numpy.asmatrix(numpy.ravel( U )).T
2711 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2712 Xn = CovarianceInflation( Xn,
2713 selfA._parameters["InflationType"],
2714 selfA._parameters["InflationFactor"],
2717 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2718 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2720 returnSerieAsArrayMatrix = True )
2721 qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2722 Xn_predicted = EMX + qi
2723 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2724 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2725 Xn_predicted = Xn_predicted + Cm * Un
2726 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2727 # --- > Par principe, M = Id, Q = 0
2730 #--------------------------
2731 if VariantM == "MLEF13":
2732 Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
2733 EaX = EnsembleOfAnomalies( Xn_predicted ) / numpy.sqrt(__m-1)
2739 vw = numpy.zeros(__m)
2740 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2741 vx1 = (Xfm + EaX @ vw).reshape((__n,-1))
2744 E1 = vx1 + _epsilon * EaX
2746 E1 = vx1 + numpy.sqrt(__m-1) * EaX @ Ta
2748 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2750 returnSerieAsArrayMatrix = True )
2751 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
2754 EaY = (HE2 - vy2) / _epsilon
2756 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / numpy.sqrt(__m-1)
2758 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
2759 mH = numpy.eye(__m) + EaY.transpose() @ (RI * EaY)
2760 Deltaw = - numpy.linalg.solve(mH,GradJ)
2765 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2770 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2772 Xn = vx1 + numpy.sqrt(__m-1) * EaX @ Ta @ Ua
2773 #--------------------------
2775 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2777 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2778 Xn = CovarianceInflation( Xn,
2779 selfA._parameters["InflationType"],
2780 selfA._parameters["InflationFactor"],
2783 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2784 #--------------------------
2786 if selfA._parameters["StoreInternalVariables"] \
2787 or selfA._toStore("CostFunctionJ") \
2788 or selfA._toStore("CostFunctionJb") \
2789 or selfA._toStore("CostFunctionJo") \
2790 or selfA._toStore("APosterioriCovariance") \
2791 or selfA._toStore("InnovationAtCurrentAnalysis") \
2792 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2793 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2794 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2795 _Innovation = Ynpu - _HXa
2797 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2798 # ---> avec analysis
2799 selfA.StoredVariables["Analysis"].store( Xa )
2800 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2801 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2802 if selfA._toStore("InnovationAtCurrentAnalysis"):
2803 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2804 # ---> avec current state
2805 if selfA._parameters["StoreInternalVariables"] \
2806 or selfA._toStore("CurrentState"):
2807 selfA.StoredVariables["CurrentState"].store( Xn )
2808 if selfA._toStore("ForecastState"):
2809 selfA.StoredVariables["ForecastState"].store( EMX )
2810 if selfA._toStore("BMA"):
2811 selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
2812 if selfA._toStore("InnovationAtCurrentState"):
2813 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu.reshape((__p,-1)) )
2814 if selfA._toStore("SimulatedObservationAtCurrentState") \
2815 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2816 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2818 if selfA._parameters["StoreInternalVariables"] \
2819 or selfA._toStore("CostFunctionJ") \
2820 or selfA._toStore("CostFunctionJb") \
2821 or selfA._toStore("CostFunctionJo") \
2822 or selfA._toStore("CurrentOptimum") \
2823 or selfA._toStore("APosterioriCovariance"):
2824 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2825 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2827 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2828 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2829 selfA.StoredVariables["CostFunctionJ" ].store( J )
2831 if selfA._toStore("IndexOfOptimum") \
2832 or selfA._toStore("CurrentOptimum") \
2833 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2834 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2835 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2836 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2837 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2838 if selfA._toStore("IndexOfOptimum"):
2839 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2840 if selfA._toStore("CurrentOptimum"):
2841 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2842 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2843 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2844 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2845 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2846 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2847 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2848 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2849 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2850 if selfA._toStore("APosterioriCovariance"):
2851 Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
2853 Pn = 0.5 * (Pn + Pn.T)
2854 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2855 if selfA._parameters["EstimationOf"] == "Parameters" \
2856 and J < previousJMinimum:
2857 previousJMinimum = J
2859 if selfA._toStore("APosterioriCovariance"):
2860 covarianceXaMin = Pn
2862 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2863 # ----------------------------------------------------------------------
2864 if selfA._parameters["EstimationOf"] == "Parameters":
2865 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2866 selfA.StoredVariables["Analysis"].store( XaMin )
2867 if selfA._toStore("APosterioriCovariance"):
2868 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2869 if selfA._toStore("BMA"):
2870 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2874 # ==============================================================================
2875 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
2876 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
2880 if selfA._parameters["EstimationOf"] == "Parameters":
2881 selfA._parameters["StoreInternalVariables"] = True
2885 H = HO["Direct"].appliedControledFormTo
2887 if selfA._parameters["EstimationOf"] == "State":
2888 M = EM["Direct"].appliedControledFormTo
2890 if CM is not None and "Tangent" in CM and U is not None:
2891 Cm = CM["Tangent"].asMatrix(Xb)
2895 # Nombre de pas identique au nombre de pas d'observations
2896 # -------------------------------------------------------
2897 if hasattr(Y,"stepnumber"):
2898 duration = Y.stepnumber()
2899 __p = numpy.cumprod(Y.shape())[-1]
2902 __p = numpy.array(Y).size
2904 # Précalcul des inversions de B et R
2905 # ----------------------------------
2906 if selfA._parameters["StoreInternalVariables"] \
2907 or selfA._toStore("CostFunctionJ") \
2908 or selfA._toStore("CostFunctionJb") \
2909 or selfA._toStore("CostFunctionJo") \
2910 or selfA._toStore("CurrentOptimum") \
2911 or selfA._toStore("APosterioriCovariance"):
2918 __m = selfA._parameters["NumberOfMembers"]
2919 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2921 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2923 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2925 Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
2927 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2928 selfA.StoredVariables["Analysis"].store( Xb )
2929 if selfA._toStore("APosterioriCovariance"):
2930 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2933 previousJMinimum = numpy.finfo(float).max
2935 for step in range(duration-1):
2936 if hasattr(Y,"store"):
2937 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,-1))
2939 Ynpu = numpy.ravel( Y ).reshape((__p,-1))
2942 if hasattr(U,"store") and len(U)>1:
2943 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2944 elif hasattr(U,"store") and len(U)==1:
2945 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2947 Un = numpy.asmatrix(numpy.ravel( U )).T
2951 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2952 Xn = CovarianceInflation( Xn,
2953 selfA._parameters["InflationType"],
2954 selfA._parameters["InflationFactor"],
2957 #--------------------------
2958 if VariantM == "IEnKF12":
2959 Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
2960 EaX = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1)
2965 vw = numpy.zeros(__m)
2966 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2967 vx1 = (Xfm + EaX @ vw).reshape((__n,-1))
2970 E1 = vx1 + _epsilon * EaX
2972 E1 = vx1 + numpy.sqrt(__m-1) * EaX @ Ta
2974 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
2975 E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2977 returnSerieAsArrayMatrix = True )
2978 elif selfA._parameters["EstimationOf"] == "Parameters":
2979 # --- > Par principe, M = Id
2981 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
2982 vy1 = H((vx2, Un)).reshape((__p,-1))
2984 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
2986 returnSerieAsArrayMatrix = True )
2987 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,-1))
2990 EaY = (HE2 - vy2) / _epsilon
2992 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / numpy.sqrt(__m-1)
2994 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
2995 mH = numpy.eye(__m) + EaY.transpose() @ (RI * EaY)
2996 Deltaw = - numpy.linalg.solve(mH,GradJ)
3001 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
3005 A2 = EnsembleOfAnomalies( E2 )
3008 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
3009 A2 = numpy.sqrt(__m-1) * A2 @ Ta / _epsilon
3012 #--------------------------
3014 raise ValueError("VariantM has to be chosen in the authorized methods list.")
3016 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
3017 Xn = CovarianceInflation( Xn,
3018 selfA._parameters["InflationType"],
3019 selfA._parameters["InflationFactor"],
3022 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,-1))
3023 #--------------------------
3025 if selfA._parameters["StoreInternalVariables"] \
3026 or selfA._toStore("CostFunctionJ") \
3027 or selfA._toStore("CostFunctionJb") \
3028 or selfA._toStore("CostFunctionJo") \
3029 or selfA._toStore("APosterioriCovariance") \
3030 or selfA._toStore("InnovationAtCurrentAnalysis") \
3031 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
3032 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3033 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
3034 _Innovation = Ynpu - _HXa
3036 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3037 # ---> avec analysis
3038 selfA.StoredVariables["Analysis"].store( Xa )
3039 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
3040 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
3041 if selfA._toStore("InnovationAtCurrentAnalysis"):
3042 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
3043 # ---> avec current state
3044 if selfA._parameters["StoreInternalVariables"] \
3045 or selfA._toStore("CurrentState"):
3046 selfA.StoredVariables["CurrentState"].store( Xn )
3047 if selfA._toStore("ForecastState"):
3048 selfA.StoredVariables["ForecastState"].store( E2 )
3049 if selfA._toStore("BMA"):
3050 selfA.StoredVariables["BMA"].store( E2 - Xa )
3051 if selfA._toStore("InnovationAtCurrentState"):
3052 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu.reshape((__p,-1)) )
3053 if selfA._toStore("SimulatedObservationAtCurrentState") \
3054 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3055 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
3057 if selfA._parameters["StoreInternalVariables"] \
3058 or selfA._toStore("CostFunctionJ") \
3059 or selfA._toStore("CostFunctionJb") \
3060 or selfA._toStore("CostFunctionJo") \
3061 or selfA._toStore("CurrentOptimum") \
3062 or selfA._toStore("APosterioriCovariance"):
3063 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
3064 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
3066 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3067 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3068 selfA.StoredVariables["CostFunctionJ" ].store( J )
3070 if selfA._toStore("IndexOfOptimum") \
3071 or selfA._toStore("CurrentOptimum") \
3072 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
3073 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
3074 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
3075 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3076 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3077 if selfA._toStore("IndexOfOptimum"):
3078 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3079 if selfA._toStore("CurrentOptimum"):
3080 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
3081 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3082 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
3083 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3084 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3085 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3086 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3087 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3088 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3089 if selfA._toStore("APosterioriCovariance"):
3090 Eai = EnsembleOfAnomalies( Xn ) / numpy.sqrt(__m-1) # Anomalies
3092 Pn = 0.5 * (Pn + Pn.T)
3093 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3094 if selfA._parameters["EstimationOf"] == "Parameters" \
3095 and J < previousJMinimum:
3096 previousJMinimum = J
3098 if selfA._toStore("APosterioriCovariance"):
3099 covarianceXaMin = Pn
3101 # Stockage final supplémentaire de l'optimum en estimation de paramètres
3102 # ----------------------------------------------------------------------
3103 if selfA._parameters["EstimationOf"] == "Parameters":
3104 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3105 selfA.StoredVariables["Analysis"].store( XaMin )
3106 if selfA._toStore("APosterioriCovariance"):
3107 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3108 if selfA._toStore("BMA"):
3109 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3113 # ==============================================================================
3114 if __name__ == "__main__":
3115 print('\n AUTODIAGNOSTIC\n')