1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les objets numériques génériques.
26 __author__ = "Jean-Philippe ARGAUD"
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
36 # ==============================================================================
37 def ExecuteFunction( triplet ):
38 assert len(triplet) == 3, "Incorrect number of arguments"
39 X, xArgs, funcrepr = triplet
40 __X = numpy.asmatrix(numpy.ravel( X )).T
41 __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42 __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43 __fonction = getattr(__module,funcrepr["__userFunction__name"])
44 sys.path = __sys_path_tmp ; del __sys_path_tmp
45 if isinstance(xArgs, dict):
46 __HX = __fonction( __X, **xArgs )
48 __HX = __fonction( __X )
49 return numpy.ravel( __HX )
51 # ==============================================================================
52 class FDApproximation(object):
54 Cette classe sert d'interface pour définir les opérateurs approximés. A la
55 création d'un objet, en fournissant une fonction "Function", on obtient un
56 objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
57 "AdjointOperator". On contrôle l'approximation DF avec l'incrément
58 multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
59 "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
60 centrées si le booléen "centeredDF" est vrai.
63 name = "FDApproximation",
68 extraArguments = None,
69 avoidingRedundancy = True,
70 toleranceInRedundancy = 1.e-18,
71 lenghtOfRedundancy = -1,
76 self.__name = str(name)
77 self.__extraArgs = extraArguments
80 import multiprocessing
81 self.__mpEnabled = True
83 self.__mpEnabled = False
85 self.__mpEnabled = False
86 self.__mpWorkers = mpWorkers
87 if self.__mpWorkers is not None and self.__mpWorkers < 1:
88 self.__mpWorkers = None
89 logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
92 self.__mfEnabled = True
94 self.__mfEnabled = False
95 logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
97 if avoidingRedundancy:
99 self.__tolerBP = float(toleranceInRedundancy)
100 self.__lenghtRJ = int(lenghtOfRedundancy)
101 self.__listJPCP = [] # Jacobian Previous Calculated Points
102 self.__listJPCI = [] # Jacobian Previous Calculated Increment
103 self.__listJPCR = [] # Jacobian Previous Calculated Results
104 self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
105 self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
107 self.__avoidRC = False
110 if isinstance(Function,types.FunctionType):
111 logging.debug("FDA Calculs en multiprocessing : FunctionType")
112 self.__userFunction__name = Function.__name__
114 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
116 mod = os.path.abspath(Function.__globals__['__file__'])
117 if not os.path.isfile(mod):
118 raise ImportError("No user defined function or method found with the name %s"%(mod,))
119 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
120 self.__userFunction__path = os.path.dirname(mod)
122 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
123 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
124 elif isinstance(Function,types.MethodType):
125 logging.debug("FDA Calculs en multiprocessing : MethodType")
126 self.__userFunction__name = Function.__name__
128 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
130 mod = os.path.abspath(Function.__func__.__globals__['__file__'])
131 if not os.path.isfile(mod):
132 raise ImportError("No user defined function or method found with the name %s"%(mod,))
133 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
134 self.__userFunction__path = os.path.dirname(mod)
136 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
137 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
139 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
141 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
142 self.__userFunction = self.__userOperator.appliedTo
144 self.__centeredDF = bool(centeredDF)
145 if abs(float(increment)) > 1.e-15:
146 self.__increment = float(increment)
148 self.__increment = 0.01
152 self.__dX = numpy.asmatrix(numpy.ravel( dX )).T
153 logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
155 logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
157 # ---------------------------------------------------------
158 def __doublon__(self, e, l, n, v=None):
159 __ac, __iac = False, -1
160 for i in range(len(l)-1,-1,-1):
161 if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
162 __ac, __iac = True, i
163 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
167 # ---------------------------------------------------------
168 def DirectOperator(self, X, **extraArgs ):
170 Calcul du direct à l'aide de la fonction fournie.
172 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
173 ne doivent pas être données ici à la fonction utilisateur.
175 logging.debug("FDA Calcul DirectOperator (explicite)")
177 _HX = self.__userFunction( X, argsAsSerie = True )
179 _X = numpy.asmatrix(numpy.ravel( X )).T
180 _HX = numpy.ravel(self.__userFunction( _X ))
184 # ---------------------------------------------------------
185 def TangentMatrix(self, X ):
187 Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
188 c'est-à-dire le gradient de H en X. On utilise des différences finies
189 directionnelles autour du point X. X est un numpy.matrix.
191 Différences finies centrées (approximation d'ordre 2):
192 1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
193 dX[i] à la composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
194 on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
196 2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
198 3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
200 Différences finies non centrées (approximation d'ordre 1):
201 1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
202 composante X[i] pour composer X_plus_dXi, et on calcule la réponse
203 HX_plus_dXi = H( X_plus_dXi )
204 2/ On calcule la valeur centrale HX = H(X)
205 3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
207 4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
210 logging.debug("FDA Début du calcul de la Jacobienne")
211 logging.debug("FDA Incrément de............: %s*X"%float(self.__increment))
212 logging.debug("FDA Approximation centrée...: %s"%(self.__centeredDF))
214 if X is None or len(X)==0:
215 raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
217 _X = numpy.asmatrix(numpy.ravel( X )).T
219 if self.__dX is None:
220 _dX = self.__increment * _X
222 _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
224 if (_dX == 0.).any():
227 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
229 _dX = numpy.where( _dX == 0., moyenne, _dX )
231 __alreadyCalculated = False
233 __bidon, __alreadyCalculatedP = self.__doublon__(_X, self.__listJPCP, self.__listJPPN, None)
234 __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
235 if __alreadyCalculatedP == __alreadyCalculatedI > -1:
236 __alreadyCalculated, __i = True, __alreadyCalculatedP
237 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
239 if __alreadyCalculated:
240 logging.debug("FDA Calcul Jacobienne (par récupération du doublon %i)"%__i)
241 _Jacobienne = self.__listJPCR[__i]
243 logging.debug("FDA Calcul Jacobienne (explicite)")
244 if self.__centeredDF:
246 if self.__mpEnabled and not self.__mfEnabled:
248 "__userFunction__path" : self.__userFunction__path,
249 "__userFunction__modl" : self.__userFunction__modl,
250 "__userFunction__name" : self.__userFunction__name,
253 for i in range( len(_dX) ):
255 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
256 _X_plus_dXi[i] = _X[i] + _dXi
257 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
258 _X_moins_dXi[i] = _X[i] - _dXi
260 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
261 _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
263 import multiprocessing
264 self.__pool = multiprocessing.Pool(self.__mpWorkers)
265 _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
270 for i in range( len(_dX) ):
271 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
273 elif self.__mfEnabled:
275 for i in range( len(_dX) ):
277 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
278 _X_plus_dXi[i] = _X[i] + _dXi
279 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
280 _X_moins_dXi[i] = _X[i] - _dXi
282 _xserie.append( _X_plus_dXi )
283 _xserie.append( _X_moins_dXi )
285 _HX_plusmoins_dX = self.DirectOperator( _xserie )
288 for i in range( len(_dX) ):
289 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
293 for i in range( _dX.size ):
295 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
296 _X_plus_dXi[i] = _X[i] + _dXi
297 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
298 _X_moins_dXi[i] = _X[i] - _dXi
300 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
301 _HX_moins_dXi = self.DirectOperator( _X_moins_dXi )
303 _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
307 if self.__mpEnabled and not self.__mfEnabled:
309 "__userFunction__path" : self.__userFunction__path,
310 "__userFunction__modl" : self.__userFunction__modl,
311 "__userFunction__name" : self.__userFunction__name,
314 _jobs.append( (_X.A1, self.__extraArgs, funcrepr) )
315 for i in range( len(_dX) ):
316 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
317 _X_plus_dXi[i] = _X[i] + _dX[i]
319 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
321 import multiprocessing
322 self.__pool = multiprocessing.Pool(self.__mpWorkers)
323 _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
327 _HX = _HX_plus_dX.pop(0)
330 for i in range( len(_dX) ):
331 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
333 elif self.__mfEnabled:
335 _xserie.append( _X.A1 )
336 for i in range( len(_dX) ):
337 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
338 _X_plus_dXi[i] = _X[i] + _dX[i]
340 _xserie.append( _X_plus_dXi )
342 _HX_plus_dX = self.DirectOperator( _xserie )
344 _HX = _HX_plus_dX.pop(0)
347 for i in range( len(_dX) ):
348 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
352 _HX = self.DirectOperator( _X )
353 for i in range( _dX.size ):
355 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
356 _X_plus_dXi[i] = _X[i] + _dXi
358 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
360 _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
363 _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
365 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
366 while len(self.__listJPCP) > self.__lenghtRJ:
367 self.__listJPCP.pop(0)
368 self.__listJPCI.pop(0)
369 self.__listJPCR.pop(0)
370 self.__listJPPN.pop(0)
371 self.__listJPIN.pop(0)
372 self.__listJPCP.append( copy.copy(_X) )
373 self.__listJPCI.append( copy.copy(_dX) )
374 self.__listJPCR.append( copy.copy(_Jacobienne) )
375 self.__listJPPN.append( numpy.linalg.norm(_X) )
376 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
378 logging.debug("FDA Fin du calcul de la Jacobienne")
382 # ---------------------------------------------------------
383 def TangentOperator(self, paire, **extraArgs ):
385 Calcul du tangent à l'aide de la Jacobienne.
387 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
388 ne doivent pas être données ici à la fonction utilisateur.
391 assert len(paire) == 1, "Incorrect lenght of arguments"
393 assert len(_paire) == 2, "Incorrect number of arguments"
395 assert len(paire) == 2, "Incorrect number of arguments"
398 _Jacobienne = self.TangentMatrix( X )
399 if dX is None or len(dX) == 0:
401 # Calcul de la forme matricielle si le second argument est None
402 # -------------------------------------------------------------
403 if self.__mfEnabled: return [_Jacobienne,]
404 else: return _Jacobienne
407 # Calcul de la valeur linéarisée de H en X appliqué à dX
408 # ------------------------------------------------------
409 _dX = numpy.asmatrix(numpy.ravel( dX )).T
410 _HtX = numpy.dot(_Jacobienne, _dX)
411 if self.__mfEnabled: return [_HtX.A1,]
414 # ---------------------------------------------------------
415 def AdjointOperator(self, paire, **extraArgs ):
417 Calcul de l'adjoint à l'aide de la Jacobienne.
419 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
420 ne doivent pas être données ici à la fonction utilisateur.
423 assert len(paire) == 1, "Incorrect lenght of arguments"
425 assert len(_paire) == 2, "Incorrect number of arguments"
427 assert len(paire) == 2, "Incorrect number of arguments"
430 _JacobienneT = self.TangentMatrix( X ).T
431 if Y is None or len(Y) == 0:
433 # Calcul de la forme matricielle si le second argument est None
434 # -------------------------------------------------------------
435 if self.__mfEnabled: return [_JacobienneT,]
436 else: return _JacobienneT
439 # Calcul de la valeur de l'adjoint en X appliqué à Y
440 # --------------------------------------------------
441 _Y = numpy.asmatrix(numpy.ravel( Y )).T
442 _HaY = numpy.dot(_JacobienneT, _Y)
443 if self.__mfEnabled: return [_HaY.A1,]
446 # ==============================================================================
458 Implémentation informatique de l'algorithme MMQR, basée sur la publication :
459 David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
460 Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
463 # Recuperation des donnees et informations initiales
464 # --------------------------------------------------
465 variables = numpy.ravel( x0 )
466 mesures = numpy.ravel( y )
467 increment = sys.float_info[0]
470 quantile = float(quantile)
472 # Calcul des parametres du MM
473 # ---------------------------
474 tn = float(toler) / n
475 e0 = -tn / math.log(tn)
476 epsilon = (e0-tn)/(1+math.log(e0))
478 # Calculs d'initialisation
479 # ------------------------
480 residus = mesures - numpy.ravel( func( variables ) )
481 poids = 1./(epsilon+numpy.abs(residus))
482 veps = 1. - 2. * quantile - residus * poids
483 lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
486 # Recherche iterative
487 # -------------------
488 while (increment > toler) and (iteration < maxfun) :
491 Derivees = numpy.array(fprime(variables))
492 Derivees = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
493 DeriveesT = Derivees.transpose()
494 M = numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
495 SM = numpy.transpose(numpy.dot( DeriveesT , veps ))
496 step = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
498 variables = variables + step
499 if bounds is not None:
500 # Attention : boucle infinie à éviter si un intervalle est trop petit
501 while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
503 variables = variables - step
504 residus = mesures - numpy.ravel( func(variables) )
505 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
507 while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
509 variables = variables - step
510 residus = mesures - numpy.ravel( func(variables) )
511 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
513 increment = lastsurrogate-surrogate
514 poids = 1./(epsilon+numpy.abs(residus))
515 veps = 1. - 2. * quantile - residus * poids
516 lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
520 Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
522 return variables, Ecart, [n,p,iteration,increment,0]
524 # ==============================================================================
525 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
526 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
528 _bgcenter = numpy.ravel(_bgcenter)[:,None]
530 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
532 if _bgcovariance is None:
533 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
535 _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
536 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
538 return BackgroundEnsemble
540 # ==============================================================================
541 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
542 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
543 def __CenteredRandomAnomalies(Zr, N):
545 Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
546 notes manuscrites de MB et conforme au code de PS avec eps = -1
549 Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
550 Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
551 R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
556 _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
558 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
559 if _bgcovariance is None:
560 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
563 U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
564 _nbctl = _bgcenter.size
565 if _nbmembers > _nbctl:
566 _Z = numpy.concatenate((numpy.dot(
567 numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
568 numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
570 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
571 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
572 BackgroundEnsemble = _bgcenter + _Zca
574 if max(abs(_bgcovariance.flatten())) > 0:
575 _nbctl = _bgcenter.size
576 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
577 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
578 BackgroundEnsemble = _bgcenter + _Zca
580 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
582 return BackgroundEnsemble
584 # ==============================================================================
585 def EnsembleOfAnomalies( Ensemble, OptMean = None, Normalisation = 1.):
586 "Renvoie les anomalies centrées à partir d'un ensemble TailleEtat*NbMembres"
588 __Em = numpy.asarray(Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
590 __Em = numpy.ravel(OptMean).reshape((-1,1))
592 return Normalisation * (numpy.asarray(Ensemble) - __Em)
594 # ==============================================================================
595 def EnsembleErrorCovariance( Ensemble ):
596 "Renvoie la covariance d'ensemble"
597 __Anomalies = EnsembleOfAnomalies( Ensemble )
598 __n, __m = numpy.asarray(__Anomalies).shape
599 __Covariance = (__Anomalies @ __Anomalies.T) / (__m-1)
601 __Covariance = (__Covariance + __Covariance.T) * 0.5
602 # Assure la positivité
603 __epsilon = mpr*numpy.trace(__Covariance)
604 __Covariance = __Covariance + __epsilon * numpy.identity(__n)
608 # ==============================================================================
609 def CovarianceInflation(
611 InflationType = None,
612 InflationFactor = None,
613 BackgroundCov = None,
616 Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
618 Synthèse : Hunt 2007, section 2.3.5
620 if InflationFactor is None:
623 InflationFactor = float(InflationFactor)
625 if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
626 if InflationFactor < 1.:
627 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
628 if InflationFactor < 1.+mpr:
630 OutputCovOrEns = InflationFactor**2 * InputCovOrEns
632 elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
633 if InflationFactor < 1.:
634 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
635 if InflationFactor < 1.+mpr:
637 InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
638 OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
639 + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
641 elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
642 if InflationFactor < 0.:
643 raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
644 if InflationFactor < mpr:
646 __n, __m = numpy.asarray(InputCovOrEns).shape
648 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
649 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
651 elif InflationType == "HybridOnBackgroundCovariance":
652 if InflationFactor < 0.:
653 raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
654 if InflationFactor < mpr:
656 __n, __m = numpy.asarray(InputCovOrEns).shape
658 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
659 if BackgroundCov is None:
660 raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
661 if InputCovOrEns.shape != BackgroundCov.shape:
662 raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
663 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
665 elif InflationType == "Relaxation":
666 raise NotImplementedError("InflationType Relaxation")
669 raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
671 return OutputCovOrEns
673 # ==============================================================================
674 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
676 3DVAR multi-pas et multi-méthodes
681 Xn = numpy.ravel(Xb).reshape((-1,1))
683 if selfA._parameters["EstimationOf"] == "State":
684 M = EM["Direct"].appliedTo
686 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
687 selfA.StoredVariables["Analysis"].store( Xn )
688 if selfA._toStore("APosterioriCovariance"):
689 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(Xn.size)
691 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
692 if selfA._toStore("ForecastState"):
693 selfA.StoredVariables["ForecastState"].store( Xn )
695 if hasattr(Y,"stepnumber"):
696 duration = Y.stepnumber()
702 for step in range(duration-1):
703 if hasattr(Y,"store"):
704 Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
706 Ynpu = numpy.ravel( Y ).reshape((-1,1))
708 if selfA._parameters["EstimationOf"] == "State": # Forecast
709 Xn = selfA.StoredVariables["Analysis"][-1]
710 Xn_predicted = M( Xn )
711 if selfA._toStore("ForecastState"):
712 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
713 elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
714 # --- > Par principe, M = Id, Q = 0
716 Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
718 oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
722 # ==============================================================================
723 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
732 Hm = HO["Direct"].appliedTo
733 Ha = HO["Adjoint"].appliedInXTo
735 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
736 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
737 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
740 HXb = numpy.asmatrix(numpy.ravel( HXb )).T
741 if Y.size != HXb.size:
742 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
743 if max(Y.shape) != max(HXb.shape):
744 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
746 if selfA._toStore("JacobianMatrixAtBackground"):
747 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
748 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
749 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
751 # Précalcul des inversions de B et R
755 # Point de démarrage de l'optimisation
756 Xini = selfA._parameters["InitializationPoint"]
758 # Définition de la fonction-coût
759 # ------------------------------
761 _X = numpy.asmatrix(numpy.ravel( x )).T
762 if selfA._parameters["StoreInternalVariables"] or \
763 selfA._toStore("CurrentState") or \
764 selfA._toStore("CurrentOptimum"):
765 selfA.StoredVariables["CurrentState"].store( _X )
767 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
768 _Innovation = Y - _HX
769 if selfA._toStore("SimulatedObservationAtCurrentState") or \
770 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
771 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
772 if selfA._toStore("InnovationAtCurrentState"):
773 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
775 Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
776 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
779 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
780 selfA.StoredVariables["CostFunctionJb"].store( Jb )
781 selfA.StoredVariables["CostFunctionJo"].store( Jo )
782 selfA.StoredVariables["CostFunctionJ" ].store( J )
783 if selfA._toStore("IndexOfOptimum") or \
784 selfA._toStore("CurrentOptimum") or \
785 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
786 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
787 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
788 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
789 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
790 if selfA._toStore("IndexOfOptimum"):
791 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
792 if selfA._toStore("CurrentOptimum"):
793 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
794 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
795 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
796 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
797 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
798 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
799 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
800 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
801 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
804 def GradientOfCostFunction(x):
805 _X = numpy.asmatrix(numpy.ravel( x )).T
807 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
808 GradJb = BI * (_X - Xb)
809 GradJo = - Ha( (_X, RI * (Y - _HX)) )
810 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
813 # Minimisation de la fonctionnelle
814 # --------------------------------
815 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
817 if selfA._parameters["Minimizer"] == "LBFGSB":
818 if "0.19" <= scipy.version.version <= "1.1.0":
819 import lbfgsbhlt as optimiseur
821 import scipy.optimize as optimiseur
822 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
825 fprime = GradientOfCostFunction,
827 bounds = selfA._parameters["Bounds"],
828 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
829 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
830 pgtol = selfA._parameters["ProjectedGradientTolerance"],
831 iprint = selfA._parameters["optiprint"],
833 nfeval = Informations['funcalls']
834 rc = Informations['warnflag']
835 elif selfA._parameters["Minimizer"] == "TNC":
836 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
839 fprime = GradientOfCostFunction,
841 bounds = selfA._parameters["Bounds"],
842 maxfun = selfA._parameters["MaximumNumberOfSteps"],
843 pgtol = selfA._parameters["ProjectedGradientTolerance"],
844 ftol = selfA._parameters["CostDecrementTolerance"],
845 messages = selfA._parameters["optmessages"],
847 elif selfA._parameters["Minimizer"] == "CG":
848 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
851 fprime = GradientOfCostFunction,
853 maxiter = selfA._parameters["MaximumNumberOfSteps"],
854 gtol = selfA._parameters["GradientNormTolerance"],
855 disp = selfA._parameters["optdisp"],
858 elif selfA._parameters["Minimizer"] == "NCG":
859 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
862 fprime = GradientOfCostFunction,
864 maxiter = selfA._parameters["MaximumNumberOfSteps"],
865 avextol = selfA._parameters["CostDecrementTolerance"],
866 disp = selfA._parameters["optdisp"],
869 elif selfA._parameters["Minimizer"] == "BFGS":
870 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
873 fprime = GradientOfCostFunction,
875 maxiter = selfA._parameters["MaximumNumberOfSteps"],
876 gtol = selfA._parameters["GradientNormTolerance"],
877 disp = selfA._parameters["optdisp"],
881 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
883 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
884 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
886 # Correction pour pallier a un bug de TNC sur le retour du Minimum
887 # ----------------------------------------------------------------
888 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
889 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
891 # Obtention de l'analyse
892 # ----------------------
893 Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
895 selfA.StoredVariables["Analysis"].store( Xa )
897 if selfA._toStore("OMA") or \
898 selfA._toStore("SigmaObs2") or \
899 selfA._toStore("SimulationQuantiles") or \
900 selfA._toStore("SimulatedObservationAtOptimum"):
901 if selfA._toStore("SimulatedObservationAtCurrentState"):
902 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
903 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
904 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
908 # Calcul de la covariance d'analyse
909 # ---------------------------------
910 if selfA._toStore("APosterioriCovariance") or \
911 selfA._toStore("SimulationQuantiles") or \
912 selfA._toStore("JacobianMatrixAtOptimum") or \
913 selfA._toStore("KalmanGainAtOptimum"):
914 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
915 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
916 if selfA._toStore("APosterioriCovariance") or \
917 selfA._toStore("SimulationQuantiles") or \
918 selfA._toStore("KalmanGainAtOptimum"):
919 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
920 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
921 if selfA._toStore("APosterioriCovariance") or \
922 selfA._toStore("SimulationQuantiles"):
926 _ee = numpy.matrix(numpy.zeros(nb)).T
928 _HtEE = numpy.dot(HtM,_ee)
929 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
930 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
931 HessienneI = numpy.matrix( HessienneI )
933 if min(A.shape) != max(A.shape):
934 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
935 if (numpy.diag(A) < 0).any():
936 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
937 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
939 L = numpy.linalg.cholesky( A )
941 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
942 if selfA._toStore("APosterioriCovariance"):
943 selfA.StoredVariables["APosterioriCovariance"].store( A )
944 if selfA._toStore("JacobianMatrixAtOptimum"):
945 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
946 if selfA._toStore("KalmanGainAtOptimum"):
947 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
948 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
949 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
951 # Calculs et/ou stockages supplémentaires
952 # ---------------------------------------
953 if selfA._toStore("Innovation") or \
954 selfA._toStore("SigmaObs2") or \
955 selfA._toStore("MahalanobisConsistency") or \
956 selfA._toStore("OMB"):
958 if selfA._toStore("Innovation"):
959 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
960 if selfA._toStore("BMA"):
961 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
962 if selfA._toStore("OMA"):
963 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
964 if selfA._toStore("OMB"):
965 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
966 if selfA._toStore("SigmaObs2"):
967 TraceR = R.trace(Y.size)
968 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
969 if selfA._toStore("MahalanobisConsistency"):
970 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
971 if selfA._toStore("SimulationQuantiles"):
972 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
973 HXa = numpy.matrix(numpy.ravel( HXa )).T
975 for i in range(nech):
976 if selfA._parameters["SimulationForQuantiles"] == "Linear":
977 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
978 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
980 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
981 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
982 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
986 YfQ = numpy.hstack((YfQ,Yr))
989 for quantile in selfA._parameters["Quantiles"]:
990 if not (0. <= float(quantile) <= 1.): continue
991 indice = int(nech * float(quantile) - 1./nech)
992 if YQ is None: YQ = YfQ[:,indice]
993 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
994 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
995 if selfA._toStore("SimulatedObservationAtBackground"):
996 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
997 if selfA._toStore("SimulatedObservationAtOptimum"):
998 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1002 # ==============================================================================
1003 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1005 3DVAR variational analysis with no inversion of B
1012 Hm = HO["Direct"].appliedTo
1013 Ha = HO["Adjoint"].appliedInXTo
1015 # Précalcul des inversions de B et R
1019 # Point de démarrage de l'optimisation
1020 Xini = numpy.zeros(Xb.shape)
1022 # Définition de la fonction-coût
1023 # ------------------------------
1024 def CostFunction(v):
1025 _V = numpy.asmatrix(numpy.ravel( v )).T
1027 if selfA._parameters["StoreInternalVariables"] or \
1028 selfA._toStore("CurrentState") or \
1029 selfA._toStore("CurrentOptimum"):
1030 selfA.StoredVariables["CurrentState"].store( _X )
1032 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
1033 _Innovation = Y - _HX
1034 if selfA._toStore("SimulatedObservationAtCurrentState") or \
1035 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1036 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
1037 if selfA._toStore("InnovationAtCurrentState"):
1038 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1040 Jb = float( 0.5 * _V.T * BT * _V )
1041 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1044 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1045 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1046 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1047 selfA.StoredVariables["CostFunctionJ" ].store( J )
1048 if selfA._toStore("IndexOfOptimum") or \
1049 selfA._toStore("CurrentOptimum") or \
1050 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1051 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1052 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1053 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1054 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1055 if selfA._toStore("IndexOfOptimum"):
1056 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1057 if selfA._toStore("CurrentOptimum"):
1058 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1059 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1060 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1061 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1062 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1063 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1064 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1065 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1066 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1069 def GradientOfCostFunction(v):
1070 _V = numpy.asmatrix(numpy.ravel( v )).T
1073 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
1075 GradJo = - Ha( (_X, RI * (Y - _HX)) )
1076 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1079 # Minimisation de la fonctionnelle
1080 # --------------------------------
1081 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1083 if selfA._parameters["Minimizer"] == "LBFGSB":
1084 if "0.19" <= scipy.version.version <= "1.1.0":
1085 import lbfgsbhlt as optimiseur
1087 import scipy.optimize as optimiseur
1088 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1089 func = CostFunction,
1091 fprime = GradientOfCostFunction,
1093 bounds = selfA._parameters["Bounds"],
1094 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
1095 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
1096 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1097 iprint = selfA._parameters["optiprint"],
1099 nfeval = Informations['funcalls']
1100 rc = Informations['warnflag']
1101 elif selfA._parameters["Minimizer"] == "TNC":
1102 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1103 func = CostFunction,
1105 fprime = GradientOfCostFunction,
1107 bounds = selfA._parameters["Bounds"],
1108 maxfun = selfA._parameters["MaximumNumberOfSteps"],
1109 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1110 ftol = selfA._parameters["CostDecrementTolerance"],
1111 messages = selfA._parameters["optmessages"],
1113 elif selfA._parameters["Minimizer"] == "CG":
1114 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1117 fprime = GradientOfCostFunction,
1119 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1120 gtol = selfA._parameters["GradientNormTolerance"],
1121 disp = selfA._parameters["optdisp"],
1124 elif selfA._parameters["Minimizer"] == "NCG":
1125 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1128 fprime = GradientOfCostFunction,
1130 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1131 avextol = selfA._parameters["CostDecrementTolerance"],
1132 disp = selfA._parameters["optdisp"],
1135 elif selfA._parameters["Minimizer"] == "BFGS":
1136 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1139 fprime = GradientOfCostFunction,
1141 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1142 gtol = selfA._parameters["GradientNormTolerance"],
1143 disp = selfA._parameters["optdisp"],
1147 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1149 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1150 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1152 # Correction pour pallier a un bug de TNC sur le retour du Minimum
1153 # ----------------------------------------------------------------
1154 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1155 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1156 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1158 Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
1160 # Obtention de l'analyse
1161 # ----------------------
1164 selfA.StoredVariables["Analysis"].store( Xa )
1166 if selfA._toStore("OMA") or \
1167 selfA._toStore("SigmaObs2") or \
1168 selfA._toStore("SimulationQuantiles") or \
1169 selfA._toStore("SimulatedObservationAtOptimum"):
1170 if selfA._toStore("SimulatedObservationAtCurrentState"):
1171 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1172 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1173 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1177 # Calcul de la covariance d'analyse
1178 # ---------------------------------
1179 if selfA._toStore("APosterioriCovariance") or \
1180 selfA._toStore("SimulationQuantiles") or \
1181 selfA._toStore("JacobianMatrixAtOptimum") or \
1182 selfA._toStore("KalmanGainAtOptimum"):
1183 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1184 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1185 if selfA._toStore("APosterioriCovariance") or \
1186 selfA._toStore("SimulationQuantiles") or \
1187 selfA._toStore("KalmanGainAtOptimum"):
1188 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1189 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1190 if selfA._toStore("APosterioriCovariance") or \
1191 selfA._toStore("SimulationQuantiles"):
1196 _ee = numpy.matrix(numpy.zeros(nb)).T
1198 _HtEE = numpy.dot(HtM,_ee)
1199 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
1200 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1201 HessienneI = numpy.matrix( HessienneI )
1203 if min(A.shape) != max(A.shape):
1204 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1205 if (numpy.diag(A) < 0).any():
1206 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1207 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1209 L = numpy.linalg.cholesky( A )
1211 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1212 if selfA._toStore("APosterioriCovariance"):
1213 selfA.StoredVariables["APosterioriCovariance"].store( A )
1214 if selfA._toStore("JacobianMatrixAtOptimum"):
1215 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1216 if selfA._toStore("KalmanGainAtOptimum"):
1217 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1218 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1219 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1221 # Calculs et/ou stockages supplémentaires
1222 # ---------------------------------------
1223 if selfA._toStore("Innovation") or \
1224 selfA._toStore("SigmaObs2") or \
1225 selfA._toStore("MahalanobisConsistency") or \
1226 selfA._toStore("OMB"):
1228 if selfA._toStore("Innovation"):
1229 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1230 if selfA._toStore("BMA"):
1231 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1232 if selfA._toStore("OMA"):
1233 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1234 if selfA._toStore("OMB"):
1235 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1236 if selfA._toStore("SigmaObs2"):
1237 TraceR = R.trace(Y.size)
1238 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1239 if selfA._toStore("MahalanobisConsistency"):
1240 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1241 if selfA._toStore("SimulationQuantiles"):
1242 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1243 HXa = numpy.matrix(numpy.ravel( HXa )).T
1245 for i in range(nech):
1246 if selfA._parameters["SimulationForQuantiles"] == "Linear":
1247 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1248 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1250 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1251 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1252 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1256 YfQ = numpy.hstack((YfQ,Yr))
1259 for quantile in selfA._parameters["Quantiles"]:
1260 if not (0. <= float(quantile) <= 1.): continue
1261 indice = int(nech * float(quantile) - 1./nech)
1262 if YQ is None: YQ = YfQ[:,indice]
1263 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
1264 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1265 if selfA._toStore("SimulatedObservationAtBackground"):
1266 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1267 if selfA._toStore("SimulatedObservationAtOptimum"):
1268 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1272 # ==============================================================================
1273 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1281 # Opérateur non-linéaire pour la boucle externe
1282 Hm = HO["Direct"].appliedTo
1284 # Précalcul des inversions de B et R
1288 # Point de démarrage de l'optimisation
1289 Xini = selfA._parameters["InitializationPoint"]
1291 HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
1292 Innovation = Y - HXb
1299 Xr = Xini.reshape((-1,1))
1300 while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
1304 Ht = HO["Tangent"].asMatrix(Xr)
1305 Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
1307 # Définition de la fonction-coût
1308 # ------------------------------
1309 def CostFunction(dx):
1310 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1311 if selfA._parameters["StoreInternalVariables"] or \
1312 selfA._toStore("CurrentState") or \
1313 selfA._toStore("CurrentOptimum"):
1314 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
1316 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1317 _dInnovation = Innovation - _HdX
1318 if selfA._toStore("SimulatedObservationAtCurrentState") or \
1319 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1320 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
1321 if selfA._toStore("InnovationAtCurrentState"):
1322 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
1324 Jb = float( 0.5 * _dX.T * BI * _dX )
1325 Jo = float( 0.5 * _dInnovation.T * RI * _dInnovation )
1328 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1329 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1330 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1331 selfA.StoredVariables["CostFunctionJ" ].store( J )
1332 if selfA._toStore("IndexOfOptimum") or \
1333 selfA._toStore("CurrentOptimum") or \
1334 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1335 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1336 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1337 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1338 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1339 if selfA._toStore("IndexOfOptimum"):
1340 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1341 if selfA._toStore("CurrentOptimum"):
1342 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1343 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1344 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1345 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1346 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1347 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1348 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1349 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1350 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1353 def GradientOfCostFunction(dx):
1354 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1356 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1357 _dInnovation = Innovation - _HdX
1359 GradJo = - Ht.T @ (RI * _dInnovation)
1360 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1363 # Minimisation de la fonctionnelle
1364 # --------------------------------
1365 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1367 if selfA._parameters["Minimizer"] == "LBFGSB":
1368 # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1369 if "0.19" <= scipy.version.version <= "1.1.0":
1370 import lbfgsbhlt as optimiseur
1372 import scipy.optimize as optimiseur
1373 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1374 func = CostFunction,
1375 x0 = numpy.zeros(Xini.size),
1376 fprime = GradientOfCostFunction,
1378 bounds = selfA._parameters["Bounds"],
1379 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
1380 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
1381 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1382 iprint = selfA._parameters["optiprint"],
1384 nfeval = Informations['funcalls']
1385 rc = Informations['warnflag']
1386 elif selfA._parameters["Minimizer"] == "TNC":
1387 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1388 func = CostFunction,
1389 x0 = numpy.zeros(Xini.size),
1390 fprime = GradientOfCostFunction,
1392 bounds = selfA._parameters["Bounds"],
1393 maxfun = selfA._parameters["MaximumNumberOfSteps"],
1394 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1395 ftol = selfA._parameters["CostDecrementTolerance"],
1396 messages = selfA._parameters["optmessages"],
1398 elif selfA._parameters["Minimizer"] == "CG":
1399 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1401 x0 = numpy.zeros(Xini.size),
1402 fprime = GradientOfCostFunction,
1404 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1405 gtol = selfA._parameters["GradientNormTolerance"],
1406 disp = selfA._parameters["optdisp"],
1409 elif selfA._parameters["Minimizer"] == "NCG":
1410 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1412 x0 = numpy.zeros(Xini.size),
1413 fprime = GradientOfCostFunction,
1415 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1416 avextol = selfA._parameters["CostDecrementTolerance"],
1417 disp = selfA._parameters["optdisp"],
1420 elif selfA._parameters["Minimizer"] == "BFGS":
1421 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1423 x0 = numpy.zeros(Xini.size),
1424 fprime = GradientOfCostFunction,
1426 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1427 gtol = selfA._parameters["GradientNormTolerance"],
1428 disp = selfA._parameters["optdisp"],
1432 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1434 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1435 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1437 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1438 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1439 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1441 Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
1444 DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
1445 iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
1447 # Obtention de l'analyse
1448 # ----------------------
1451 selfA.StoredVariables["Analysis"].store( Xa )
1453 if selfA._toStore("OMA") or \
1454 selfA._toStore("SigmaObs2") or \
1455 selfA._toStore("SimulationQuantiles") or \
1456 selfA._toStore("SimulatedObservationAtOptimum"):
1457 if selfA._toStore("SimulatedObservationAtCurrentState"):
1458 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1459 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1460 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1464 # Calcul de la covariance d'analyse
1465 # ---------------------------------
1466 if selfA._toStore("APosterioriCovariance") or \
1467 selfA._toStore("SimulationQuantiles") or \
1468 selfA._toStore("JacobianMatrixAtOptimum") or \
1469 selfA._toStore("KalmanGainAtOptimum"):
1470 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1471 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1472 if selfA._toStore("APosterioriCovariance") or \
1473 selfA._toStore("SimulationQuantiles") or \
1474 selfA._toStore("KalmanGainAtOptimum"):
1475 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1476 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1477 if selfA._toStore("APosterioriCovariance") or \
1478 selfA._toStore("SimulationQuantiles"):
1482 _ee = numpy.matrix(numpy.zeros(nb)).T
1484 _HtEE = numpy.dot(HtM,_ee)
1485 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
1486 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1487 HessienneI = numpy.matrix( HessienneI )
1489 if min(A.shape) != max(A.shape):
1490 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1491 if (numpy.diag(A) < 0).any():
1492 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1493 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1495 L = numpy.linalg.cholesky( A )
1497 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1498 if selfA._toStore("APosterioriCovariance"):
1499 selfA.StoredVariables["APosterioriCovariance"].store( A )
1500 if selfA._toStore("JacobianMatrixAtOptimum"):
1501 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1502 if selfA._toStore("KalmanGainAtOptimum"):
1503 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1504 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1505 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1507 # Calculs et/ou stockages supplémentaires
1508 # ---------------------------------------
1509 if selfA._toStore("Innovation") or \
1510 selfA._toStore("SigmaObs2") or \
1511 selfA._toStore("MahalanobisConsistency") or \
1512 selfA._toStore("OMB"):
1514 if selfA._toStore("Innovation"):
1515 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1516 if selfA._toStore("BMA"):
1517 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1518 if selfA._toStore("OMA"):
1519 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1520 if selfA._toStore("OMB"):
1521 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1522 if selfA._toStore("SigmaObs2"):
1523 TraceR = R.trace(Y.size)
1524 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1525 if selfA._toStore("MahalanobisConsistency"):
1526 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1527 if selfA._toStore("SimulationQuantiles"):
1528 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1529 HXa = numpy.matrix(numpy.ravel( HXa )).T
1531 for i in range(nech):
1532 if selfA._parameters["SimulationForQuantiles"] == "Linear":
1533 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1534 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1536 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1537 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1538 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1542 YfQ = numpy.hstack((YfQ,Yr))
1545 for quantile in selfA._parameters["Quantiles"]:
1546 if not (0. <= float(quantile) <= 1.): continue
1547 indice = int(nech * float(quantile) - 1./nech)
1548 if YQ is None: YQ = YfQ[:,indice]
1549 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
1550 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1551 if selfA._toStore("SimulatedObservationAtBackground"):
1552 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1553 if selfA._toStore("SimulatedObservationAtOptimum"):
1554 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1558 # ==============================================================================
1559 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1568 Hm = HO["Direct"].appliedTo
1570 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
1571 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
1572 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
1575 HXb = numpy.asmatrix(numpy.ravel( HXb )).T
1576 if Y.size != HXb.size:
1577 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
1578 if max(Y.shape) != max(HXb.shape):
1579 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
1581 if selfA._toStore("JacobianMatrixAtBackground"):
1582 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
1583 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
1584 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
1586 Ht = HO["Tangent"].asMatrix(Xb)
1588 HBHTpR = R + Ht * BHT
1589 Innovation = Y - HXb
1591 # Point de démarrage de l'optimisation
1592 Xini = numpy.zeros(Xb.shape)
1594 # Définition de la fonction-coût
1595 # ------------------------------
1596 def CostFunction(w):
1597 _W = numpy.asmatrix(numpy.ravel( w )).T
1598 if selfA._parameters["StoreInternalVariables"] or \
1599 selfA._toStore("CurrentState") or \
1600 selfA._toStore("CurrentOptimum"):
1601 selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
1602 if selfA._toStore("SimulatedObservationAtCurrentState") or \
1603 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1604 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
1605 if selfA._toStore("InnovationAtCurrentState"):
1606 selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
1608 Jb = float( 0.5 * _W.T * HBHTpR * _W )
1609 Jo = float( - _W.T * Innovation )
1612 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1613 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1614 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1615 selfA.StoredVariables["CostFunctionJ" ].store( J )
1616 if selfA._toStore("IndexOfOptimum") or \
1617 selfA._toStore("CurrentOptimum") or \
1618 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1619 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1620 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1621 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1622 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1623 if selfA._toStore("IndexOfOptimum"):
1624 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1625 if selfA._toStore("CurrentOptimum"):
1626 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1627 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1628 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1629 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1630 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1631 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1632 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1633 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1634 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1637 def GradientOfCostFunction(w):
1638 _W = numpy.asmatrix(numpy.ravel( w )).T
1639 GradJb = HBHTpR * _W
1640 GradJo = - Innovation
1641 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1644 # Minimisation de la fonctionnelle
1645 # --------------------------------
1646 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1648 if selfA._parameters["Minimizer"] == "LBFGSB":
1649 if "0.19" <= scipy.version.version <= "1.1.0":
1650 import lbfgsbhlt as optimiseur
1652 import scipy.optimize as optimiseur
1653 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1654 func = CostFunction,
1656 fprime = GradientOfCostFunction,
1658 bounds = selfA._parameters["Bounds"],
1659 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
1660 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
1661 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1662 iprint = selfA._parameters["optiprint"],
1664 nfeval = Informations['funcalls']
1665 rc = Informations['warnflag']
1666 elif selfA._parameters["Minimizer"] == "TNC":
1667 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1668 func = CostFunction,
1670 fprime = GradientOfCostFunction,
1672 bounds = selfA._parameters["Bounds"],
1673 maxfun = selfA._parameters["MaximumNumberOfSteps"],
1674 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1675 ftol = selfA._parameters["CostDecrementTolerance"],
1676 messages = selfA._parameters["optmessages"],
1678 elif selfA._parameters["Minimizer"] == "CG":
1679 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1682 fprime = GradientOfCostFunction,
1684 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1685 gtol = selfA._parameters["GradientNormTolerance"],
1686 disp = selfA._parameters["optdisp"],
1689 elif selfA._parameters["Minimizer"] == "NCG":
1690 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1693 fprime = GradientOfCostFunction,
1695 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1696 avextol = selfA._parameters["CostDecrementTolerance"],
1697 disp = selfA._parameters["optdisp"],
1700 elif selfA._parameters["Minimizer"] == "BFGS":
1701 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1704 fprime = GradientOfCostFunction,
1706 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1707 gtol = selfA._parameters["GradientNormTolerance"],
1708 disp = selfA._parameters["optdisp"],
1712 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1714 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1715 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1717 # Correction pour pallier a un bug de TNC sur le retour du Minimum
1718 # ----------------------------------------------------------------
1719 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1720 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1721 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1723 Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
1725 # Obtention de l'analyse
1726 # ----------------------
1729 selfA.StoredVariables["Analysis"].store( Xa )
1731 if selfA._toStore("OMA") or \
1732 selfA._toStore("SigmaObs2") or \
1733 selfA._toStore("SimulationQuantiles") or \
1734 selfA._toStore("SimulatedObservationAtOptimum"):
1735 if selfA._toStore("SimulatedObservationAtCurrentState"):
1736 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1737 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1738 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1742 # Calcul de la covariance d'analyse
1743 # ---------------------------------
1744 if selfA._toStore("APosterioriCovariance") or \
1745 selfA._toStore("SimulationQuantiles") or \
1746 selfA._toStore("JacobianMatrixAtOptimum") or \
1747 selfA._toStore("KalmanGainAtOptimum"):
1748 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1749 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1750 if selfA._toStore("APosterioriCovariance") or \
1751 selfA._toStore("SimulationQuantiles") or \
1752 selfA._toStore("KalmanGainAtOptimum"):
1753 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1754 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1755 if selfA._toStore("APosterioriCovariance") or \
1756 selfA._toStore("SimulationQuantiles"):
1762 _ee = numpy.matrix(numpy.zeros(nb)).T
1764 _HtEE = numpy.dot(HtM,_ee)
1765 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
1766 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1767 HessienneI = numpy.matrix( HessienneI )
1769 if min(A.shape) != max(A.shape):
1770 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1771 if (numpy.diag(A) < 0).any():
1772 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1773 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1775 L = numpy.linalg.cholesky( A )
1777 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1778 if selfA._toStore("APosterioriCovariance"):
1779 selfA.StoredVariables["APosterioriCovariance"].store( A )
1780 if selfA._toStore("JacobianMatrixAtOptimum"):
1781 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1782 if selfA._toStore("KalmanGainAtOptimum"):
1783 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1784 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1785 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1787 # Calculs et/ou stockages supplémentaires
1788 # ---------------------------------------
1789 if selfA._toStore("Innovation") or \
1790 selfA._toStore("SigmaObs2") or \
1791 selfA._toStore("MahalanobisConsistency") or \
1792 selfA._toStore("OMB"):
1794 if selfA._toStore("Innovation"):
1795 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1796 if selfA._toStore("BMA"):
1797 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1798 if selfA._toStore("OMA"):
1799 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1800 if selfA._toStore("OMB"):
1801 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1802 if selfA._toStore("SigmaObs2"):
1803 TraceR = R.trace(Y.size)
1804 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1805 if selfA._toStore("MahalanobisConsistency"):
1806 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1807 if selfA._toStore("SimulationQuantiles"):
1808 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1809 HXa = numpy.matrix(numpy.ravel( HXa )).T
1811 for i in range(nech):
1812 if selfA._parameters["SimulationForQuantiles"] == "Linear":
1813 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1814 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1816 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1817 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1818 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1822 YfQ = numpy.hstack((YfQ,Yr))
1825 for quantile in selfA._parameters["Quantiles"]:
1826 if not (0. <= float(quantile) <= 1.): continue
1827 indice = int(nech * float(quantile) - 1./nech)
1828 if YQ is None: YQ = YfQ[:,indice]
1829 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
1830 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1831 if selfA._toStore("SimulatedObservationAtBackground"):
1832 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1833 if selfA._toStore("SimulatedObservationAtOptimum"):
1834 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1838 # ==============================================================================
1839 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1848 Hm = HO["Direct"].appliedControledFormTo
1849 Mm = EM["Direct"].appliedControledFormTo
1851 if CM is not None and "Tangent" in CM and U is not None:
1852 Cm = CM["Tangent"].asMatrix(Xb)
1858 if hasattr(U,"store") and 1<=_step<len(U) :
1859 _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
1860 elif hasattr(U,"store") and len(U)==1:
1861 _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1863 _Un = numpy.asmatrix(numpy.ravel( U )).T
1868 if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
1869 _Cm = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
1875 # Remarque : les observations sont exploitées à partir du pas de temps
1876 # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
1877 # Donc le pas 0 n'est pas utilisé puisque la première étape commence
1878 # avec l'observation du pas 1.
1880 # Nombre de pas identique au nombre de pas d'observations
1881 if hasattr(Y,"stepnumber"):
1882 duration = Y.stepnumber()
1886 # Précalcul des inversions de B et R
1890 # Point de démarrage de l'optimisation
1891 Xini = selfA._parameters["InitializationPoint"]
1893 # Définition de la fonction-coût
1894 # ------------------------------
1895 selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
1896 selfA.DirectInnovation = [None,] # Le pas 0 n'est pas observé
1897 def CostFunction(x):
1898 _X = numpy.asmatrix(numpy.ravel( x )).T
1899 if selfA._parameters["StoreInternalVariables"] or \
1900 selfA._toStore("CurrentState") or \
1901 selfA._toStore("CurrentOptimum"):
1902 selfA.StoredVariables["CurrentState"].store( _X )
1903 Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
1904 selfA.DirectCalculation = [None,]
1905 selfA.DirectInnovation = [None,]
1908 for step in range(0,duration-1):
1909 if hasattr(Y,"store"):
1910 _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
1912 _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
1916 if selfA._parameters["EstimationOf"] == "State":
1917 _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
1918 elif selfA._parameters["EstimationOf"] == "Parameters":
1921 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1922 _Xn = numpy.max(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
1923 _Xn = numpy.min(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
1925 # Etape de différence aux observations
1926 if selfA._parameters["EstimationOf"] == "State":
1927 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
1928 elif selfA._parameters["EstimationOf"] == "Parameters":
1929 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
1931 # Stockage de l'état
1932 selfA.DirectCalculation.append( _Xn )
1933 selfA.DirectInnovation.append( _YmHMX )
1935 # Ajout dans la fonctionnelle d'observation
1936 Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
1939 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1940 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1941 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1942 selfA.StoredVariables["CostFunctionJ" ].store( J )
1943 if selfA._toStore("IndexOfOptimum") or \
1944 selfA._toStore("CurrentOptimum") or \
1945 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1946 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1947 selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1948 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1949 if selfA._toStore("IndexOfOptimum"):
1950 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1951 if selfA._toStore("CurrentOptimum"):
1952 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1953 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1954 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1955 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1956 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1957 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1958 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1961 def GradientOfCostFunction(x):
1962 _X = numpy.asmatrix(numpy.ravel( x )).T
1963 GradJb = BI * (_X - Xb)
1965 for step in range(duration-1,0,-1):
1966 # Étape de récupération du dernier stockage de l'évolution
1967 _Xn = selfA.DirectCalculation.pop()
1968 # Étape de récupération du dernier stockage de l'innovation
1969 _YmHMX = selfA.DirectInnovation.pop()
1970 # Calcul des adjoints
1971 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
1972 Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
1973 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
1974 Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
1975 # Calcul du gradient par état adjoint
1976 GradJo = GradJo + Ha * RI * _YmHMX # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
1977 GradJo = Ma * GradJo # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
1978 GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
1981 # Minimisation de la fonctionnelle
1982 # --------------------------------
1983 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1985 if selfA._parameters["Minimizer"] == "LBFGSB":
1986 if "0.19" <= scipy.version.version <= "1.1.0":
1987 import lbfgsbhlt as optimiseur
1989 import scipy.optimize as optimiseur
1990 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1991 func = CostFunction,
1993 fprime = GradientOfCostFunction,
1995 bounds = selfA._parameters["Bounds"],
1996 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
1997 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
1998 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1999 iprint = selfA._parameters["optiprint"],
2001 nfeval = Informations['funcalls']
2002 rc = Informations['warnflag']
2003 elif selfA._parameters["Minimizer"] == "TNC":
2004 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2005 func = CostFunction,
2007 fprime = GradientOfCostFunction,
2009 bounds = selfA._parameters["Bounds"],
2010 maxfun = selfA._parameters["MaximumNumberOfSteps"],
2011 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2012 ftol = selfA._parameters["CostDecrementTolerance"],
2013 messages = selfA._parameters["optmessages"],
2015 elif selfA._parameters["Minimizer"] == "CG":
2016 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2019 fprime = GradientOfCostFunction,
2021 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2022 gtol = selfA._parameters["GradientNormTolerance"],
2023 disp = selfA._parameters["optdisp"],
2026 elif selfA._parameters["Minimizer"] == "NCG":
2027 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2030 fprime = GradientOfCostFunction,
2032 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2033 avextol = selfA._parameters["CostDecrementTolerance"],
2034 disp = selfA._parameters["optdisp"],
2037 elif selfA._parameters["Minimizer"] == "BFGS":
2038 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2041 fprime = GradientOfCostFunction,
2043 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2044 gtol = selfA._parameters["GradientNormTolerance"],
2045 disp = selfA._parameters["optdisp"],
2049 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2051 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2052 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2054 # Correction pour pallier a un bug de TNC sur le retour du Minimum
2055 # ----------------------------------------------------------------
2056 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2057 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2059 # Obtention de l'analyse
2060 # ----------------------
2061 Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
2063 selfA.StoredVariables["Analysis"].store( Xa )
2065 # Calculs et/ou stockages supplémentaires
2066 # ---------------------------------------
2067 if selfA._toStore("BMA"):
2068 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2072 # ==============================================================================
2073 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
2077 if selfA._parameters["EstimationOf"] == "Parameters":
2078 selfA._parameters["StoreInternalVariables"] = True
2082 H = HO["Direct"].appliedControledFormTo
2084 if selfA._parameters["EstimationOf"] == "State":
2085 M = EM["Direct"].appliedControledFormTo
2087 if CM is not None and "Tangent" in CM and U is not None:
2088 Cm = CM["Tangent"].asMatrix(Xb)
2092 # Nombre de pas identique au nombre de pas d'observations
2093 # -------------------------------------------------------
2094 if hasattr(Y,"stepnumber"):
2095 duration = Y.stepnumber()
2096 __p = numpy.cumprod(Y.shape())[-1]
2099 __p = numpy.array(Y).size
2101 # Précalcul des inversions de B et R
2102 # ----------------------------------
2103 if selfA._parameters["StoreInternalVariables"] \
2104 or selfA._toStore("CostFunctionJ") \
2105 or selfA._toStore("CostFunctionJb") \
2106 or selfA._toStore("CostFunctionJo") \
2107 or selfA._toStore("CurrentOptimum") \
2108 or selfA._toStore("APosterioriCovariance"):
2115 __m = selfA._parameters["NumberOfMembers"]
2116 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2118 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2120 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2122 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2124 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2125 selfA.StoredVariables["Analysis"].store( Xb )
2126 if selfA._toStore("APosterioriCovariance"):
2127 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2130 previousJMinimum = numpy.finfo(float).max
2132 for step in range(duration-1):
2133 if hasattr(Y,"store"):
2134 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2136 Ynpu = numpy.ravel( Y ).reshape((__p,1))
2139 if hasattr(U,"store") and len(U)>1:
2140 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2141 elif hasattr(U,"store") and len(U)==1:
2142 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2144 Un = numpy.asmatrix(numpy.ravel( U )).T
2148 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2149 Xn = CovarianceInflation( Xn,
2150 selfA._parameters["InflationType"],
2151 selfA._parameters["InflationFactor"],
2154 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2155 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2157 returnSerieAsArrayMatrix = True )
2158 qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2159 Xn_predicted = EMX + qi
2160 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2162 returnSerieAsArrayMatrix = True )
2163 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2164 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2165 Xn_predicted = Xn_predicted + Cm * Un
2166 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2167 # --- > Par principe, M = Id, Q = 0
2169 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2171 returnSerieAsArrayMatrix = True )
2173 # Mean of forecast and observation of forecast
2174 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2175 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2177 #--------------------------
2178 if VariantM == "KalmanFilterFormula05":
2179 PfHT, HPfHT = 0., 0.
2180 for i in range(__m):
2181 Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
2182 Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
2183 PfHT += Exfi * Eyfi.T
2184 HPfHT += Eyfi * Eyfi.T
2185 PfHT = (1./(__m-1)) * PfHT
2186 HPfHT = (1./(__m-1)) * HPfHT
2187 Kn = PfHT * ( R + HPfHT ).I
2190 for i in range(__m):
2191 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
2192 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
2193 #--------------------------
2194 elif VariantM == "KalmanFilterFormula16":
2195 EpY = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
2196 EpYm = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2198 EaX = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
2199 EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
2201 Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
2203 for i in range(__m):
2204 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
2205 #--------------------------
2207 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2209 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2210 Xn = CovarianceInflation( Xn,
2211 selfA._parameters["InflationType"],
2212 selfA._parameters["InflationFactor"],
2215 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2216 #--------------------------
2218 if selfA._parameters["StoreInternalVariables"] \
2219 or selfA._toStore("CostFunctionJ") \
2220 or selfA._toStore("CostFunctionJb") \
2221 or selfA._toStore("CostFunctionJo") \
2222 or selfA._toStore("APosterioriCovariance") \
2223 or selfA._toStore("InnovationAtCurrentAnalysis") \
2224 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2225 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2226 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2227 _Innovation = Ynpu - _HXa
2229 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2230 # ---> avec analysis
2231 selfA.StoredVariables["Analysis"].store( Xa )
2232 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2233 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2234 if selfA._toStore("InnovationAtCurrentAnalysis"):
2235 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2236 # ---> avec current state
2237 if selfA._parameters["StoreInternalVariables"] \
2238 or selfA._toStore("CurrentState"):
2239 selfA.StoredVariables["CurrentState"].store( Xn )
2240 if selfA._toStore("ForecastState"):
2241 selfA.StoredVariables["ForecastState"].store( EMX )
2242 if selfA._toStore("BMA"):
2243 selfA.StoredVariables["BMA"].store( EMX - Xa )
2244 if selfA._toStore("InnovationAtCurrentState"):
2245 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
2246 if selfA._toStore("SimulatedObservationAtCurrentState") \
2247 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2248 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2250 if selfA._parameters["StoreInternalVariables"] \
2251 or selfA._toStore("CostFunctionJ") \
2252 or selfA._toStore("CostFunctionJb") \
2253 or selfA._toStore("CostFunctionJo") \
2254 or selfA._toStore("CurrentOptimum") \
2255 or selfA._toStore("APosterioriCovariance"):
2256 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2257 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2259 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2260 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2261 selfA.StoredVariables["CostFunctionJ" ].store( J )
2263 if selfA._toStore("IndexOfOptimum") \
2264 or selfA._toStore("CurrentOptimum") \
2265 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2266 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2267 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2268 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2269 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2270 if selfA._toStore("IndexOfOptimum"):
2271 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2272 if selfA._toStore("CurrentOptimum"):
2273 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2274 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2275 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2276 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2277 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2278 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2279 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2280 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2281 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2282 if selfA._toStore("APosterioriCovariance"):
2283 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2284 if selfA._parameters["EstimationOf"] == "Parameters" \
2285 and J < previousJMinimum:
2286 previousJMinimum = J
2288 if selfA._toStore("APosterioriCovariance"):
2289 covarianceXaMin = Pn
2291 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2292 # ----------------------------------------------------------------------
2293 if selfA._parameters["EstimationOf"] == "Parameters":
2294 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2295 selfA.StoredVariables["Analysis"].store( XaMin )
2296 if selfA._toStore("APosterioriCovariance"):
2297 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2298 if selfA._toStore("BMA"):
2299 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2303 # ==============================================================================
2304 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
2306 Ensemble-Transform EnKF
2308 if selfA._parameters["EstimationOf"] == "Parameters":
2309 selfA._parameters["StoreInternalVariables"] = True
2313 H = HO["Direct"].appliedControledFormTo
2315 if selfA._parameters["EstimationOf"] == "State":
2316 M = EM["Direct"].appliedControledFormTo
2318 if CM is not None and "Tangent" in CM and U is not None:
2319 Cm = CM["Tangent"].asMatrix(Xb)
2323 # Nombre de pas identique au nombre de pas d'observations
2324 # -------------------------------------------------------
2325 if hasattr(Y,"stepnumber"):
2326 duration = Y.stepnumber()
2327 __p = numpy.cumprod(Y.shape())[-1]
2330 __p = numpy.array(Y).size
2332 # Précalcul des inversions de B et R
2333 # ----------------------------------
2334 if selfA._parameters["StoreInternalVariables"] \
2335 or selfA._toStore("CostFunctionJ") \
2336 or selfA._toStore("CostFunctionJb") \
2337 or selfA._toStore("CostFunctionJo") \
2338 or selfA._toStore("CurrentOptimum") \
2339 or selfA._toStore("APosterioriCovariance"):
2342 elif VariantM != "KalmanFilterFormula":
2344 if VariantM == "KalmanFilterFormula":
2350 __m = selfA._parameters["NumberOfMembers"]
2351 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2353 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2355 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2357 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2358 #~ Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
2360 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2361 selfA.StoredVariables["Analysis"].store( Xb )
2362 if selfA._toStore("APosterioriCovariance"):
2363 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2366 previousJMinimum = numpy.finfo(float).max
2368 for step in range(duration-1):
2369 if hasattr(Y,"store"):
2370 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2372 Ynpu = numpy.ravel( Y ).reshape((__p,1))
2375 if hasattr(U,"store") and len(U)>1:
2376 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2377 elif hasattr(U,"store") and len(U)==1:
2378 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2380 Un = numpy.asmatrix(numpy.ravel( U )).T
2384 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2385 Xn = CovarianceInflation( Xn,
2386 selfA._parameters["InflationType"],
2387 selfA._parameters["InflationFactor"],
2390 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2391 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2393 returnSerieAsArrayMatrix = True )
2394 qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2395 Xn_predicted = EMX + qi
2396 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2398 returnSerieAsArrayMatrix = True )
2399 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2400 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2401 Xn_predicted = Xn_predicted + Cm * Un
2402 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2403 # --- > Par principe, M = Id, Q = 0
2405 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2407 returnSerieAsArrayMatrix = True )
2409 # Mean of forecast and observation of forecast
2410 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2411 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2414 EaX = EnsembleOfAnomalies( Xn_predicted )
2415 EaHX = numpy.array(HX_predicted - Hfm)
2417 #--------------------------
2418 if VariantM == "KalmanFilterFormula":
2419 mS = RIdemi * EaHX / math.sqrt(__m-1)
2420 delta = RIdemi * ( Ynpu - Hfm )
2421 mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
2422 vw = mT @ mS.T @ delta
2424 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
2425 mU = numpy.identity(__m)
2427 EaX = EaX / math.sqrt(__m-1)
2428 Xn = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
2429 #--------------------------
2430 elif VariantM == "Variational":
2431 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2432 def CostFunction(w):
2433 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
2434 _Jo = 0.5 * _A.T @ (RI * _A)
2435 _Jb = 0.5 * (__m-1) * w.T @ w
2438 def GradientOfCostFunction(w):
2439 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
2440 _GardJo = - EaHX.T @ (RI * _A)
2441 _GradJb = (__m-1) * w.reshape((__m,1))
2442 _GradJ = _GardJo + _GradJb
2443 return numpy.ravel(_GradJ)
2444 vw = scipy.optimize.fmin_cg(
2446 x0 = numpy.zeros(__m),
2447 fprime = GradientOfCostFunction,
2452 Hto = EaHX.T @ (RI * EaHX)
2453 Htb = (__m-1) * numpy.identity(__m)
2456 Pta = numpy.linalg.inv( Hta )
2457 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2459 Xn = Xfm + EaX @ (vw[:,None] + EWa)
2460 #--------------------------
2461 elif VariantM == "FiniteSize11": # Jauge Boc2011
2462 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2463 def CostFunction(w):
2464 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
2465 _Jo = 0.5 * _A.T @ (RI * _A)
2466 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
2469 def GradientOfCostFunction(w):
2470 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
2471 _GardJo = - EaHX.T @ (RI * _A)
2472 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
2473 _GradJ = _GardJo + _GradJb
2474 return numpy.ravel(_GradJ)
2475 vw = scipy.optimize.fmin_cg(
2477 x0 = numpy.zeros(__m),
2478 fprime = GradientOfCostFunction,
2483 Hto = EaHX.T @ (RI * EaHX)
2485 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
2486 / (1 + 1/__m + vw.T @ vw)**2
2489 Pta = numpy.linalg.inv( Hta )
2490 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2492 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
2493 #--------------------------
2494 elif VariantM == "FiniteSize15": # Jauge Boc2015
2495 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2496 def CostFunction(w):
2497 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
2498 _Jo = 0.5 * _A.T * RI * _A
2499 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
2502 def GradientOfCostFunction(w):
2503 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
2504 _GardJo = - EaHX.T @ (RI * _A)
2505 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
2506 _GradJ = _GardJo + _GradJb
2507 return numpy.ravel(_GradJ)
2508 vw = scipy.optimize.fmin_cg(
2510 x0 = numpy.zeros(__m),
2511 fprime = GradientOfCostFunction,
2516 Hto = EaHX.T @ (RI * EaHX)
2518 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
2519 / (1 + 1/__m + vw.T @ vw)**2
2522 Pta = numpy.linalg.inv( Hta )
2523 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2525 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
2526 #--------------------------
2527 elif VariantM == "FiniteSize16": # Jauge Boc2016
2528 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
2529 def CostFunction(w):
2530 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
2531 _Jo = 0.5 * _A.T @ (RI * _A)
2532 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
2535 def GradientOfCostFunction(w):
2536 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
2537 _GardJo = - EaHX.T @ (RI * _A)
2538 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
2539 _GradJ = _GardJo + _GradJb
2540 return numpy.ravel(_GradJ)
2541 vw = scipy.optimize.fmin_cg(
2543 x0 = numpy.zeros(__m),
2544 fprime = GradientOfCostFunction,
2549 Hto = EaHX.T @ (RI * EaHX)
2550 Htb = ((__m+1) / (__m-1)) * \
2551 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
2552 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
2555 Pta = numpy.linalg.inv( Hta )
2556 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
2558 Xn = Xfm + EaX @ (vw[:,None] + EWa)
2559 #--------------------------
2561 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2563 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2564 Xn = CovarianceInflation( Xn,
2565 selfA._parameters["InflationType"],
2566 selfA._parameters["InflationFactor"],
2569 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2570 #--------------------------
2572 if selfA._parameters["StoreInternalVariables"] \
2573 or selfA._toStore("CostFunctionJ") \
2574 or selfA._toStore("CostFunctionJb") \
2575 or selfA._toStore("CostFunctionJo") \
2576 or selfA._toStore("APosterioriCovariance") \
2577 or selfA._toStore("InnovationAtCurrentAnalysis") \
2578 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2579 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2580 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2581 _Innovation = Ynpu - _HXa
2583 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2584 # ---> avec analysis
2585 selfA.StoredVariables["Analysis"].store( Xa )
2586 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2587 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2588 if selfA._toStore("InnovationAtCurrentAnalysis"):
2589 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2590 # ---> avec current state
2591 if selfA._parameters["StoreInternalVariables"] \
2592 or selfA._toStore("CurrentState"):
2593 selfA.StoredVariables["CurrentState"].store( Xn )
2594 if selfA._toStore("ForecastState"):
2595 selfA.StoredVariables["ForecastState"].store( EMX )
2596 if selfA._toStore("BMA"):
2597 selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
2598 if selfA._toStore("InnovationAtCurrentState"):
2599 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
2600 if selfA._toStore("SimulatedObservationAtCurrentState") \
2601 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2602 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2604 if selfA._parameters["StoreInternalVariables"] \
2605 or selfA._toStore("CostFunctionJ") \
2606 or selfA._toStore("CostFunctionJb") \
2607 or selfA._toStore("CostFunctionJo") \
2608 or selfA._toStore("CurrentOptimum") \
2609 or selfA._toStore("APosterioriCovariance"):
2610 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2611 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2613 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2614 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2615 selfA.StoredVariables["CostFunctionJ" ].store( J )
2617 if selfA._toStore("IndexOfOptimum") \
2618 or selfA._toStore("CurrentOptimum") \
2619 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2620 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2621 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2622 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2623 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2624 if selfA._toStore("IndexOfOptimum"):
2625 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2626 if selfA._toStore("CurrentOptimum"):
2627 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2628 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2629 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2630 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2631 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2632 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2633 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2634 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2635 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2636 if selfA._toStore("APosterioriCovariance"):
2637 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2638 if selfA._parameters["EstimationOf"] == "Parameters" \
2639 and J < previousJMinimum:
2640 previousJMinimum = J
2642 if selfA._toStore("APosterioriCovariance"):
2643 covarianceXaMin = Pn
2644 # ---> Pour les smoothers
2645 if selfA._toStore("CurrentEnsembleState"):
2646 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
2647 if selfA._toStore("LastEnsembleForecastState"):
2648 selfA.StoredVariables["LastEnsembleForecastState"].store( EMX )
2650 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2651 # ----------------------------------------------------------------------
2652 if selfA._parameters["EstimationOf"] == "Parameters":
2653 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2654 selfA.StoredVariables["Analysis"].store( XaMin )
2655 if selfA._toStore("APosterioriCovariance"):
2656 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2657 if selfA._toStore("BMA"):
2658 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2662 # ==============================================================================
2663 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
2664 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
2666 Maximum Likelihood Ensemble Filter
2668 if selfA._parameters["EstimationOf"] == "Parameters":
2669 selfA._parameters["StoreInternalVariables"] = True
2673 H = HO["Direct"].appliedControledFormTo
2675 if selfA._parameters["EstimationOf"] == "State":
2676 M = EM["Direct"].appliedControledFormTo
2678 if CM is not None and "Tangent" in CM and U is not None:
2679 Cm = CM["Tangent"].asMatrix(Xb)
2683 # Nombre de pas identique au nombre de pas d'observations
2684 # -------------------------------------------------------
2685 if hasattr(Y,"stepnumber"):
2686 duration = Y.stepnumber()
2687 __p = numpy.cumprod(Y.shape())[-1]
2690 __p = numpy.array(Y).size
2692 # Précalcul des inversions de B et R
2693 # ----------------------------------
2694 if selfA._parameters["StoreInternalVariables"] \
2695 or selfA._toStore("CostFunctionJ") \
2696 or selfA._toStore("CostFunctionJb") \
2697 or selfA._toStore("CostFunctionJo") \
2698 or selfA._toStore("CurrentOptimum") \
2699 or selfA._toStore("APosterioriCovariance"):
2706 __m = selfA._parameters["NumberOfMembers"]
2707 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2709 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2711 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2713 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2715 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2716 selfA.StoredVariables["Analysis"].store( Xb )
2717 if selfA._toStore("APosterioriCovariance"):
2718 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2721 previousJMinimum = numpy.finfo(float).max
2723 for step in range(duration-1):
2724 if hasattr(Y,"store"):
2725 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2727 Ynpu = numpy.ravel( Y ).reshape((__p,1))
2730 if hasattr(U,"store") and len(U)>1:
2731 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2732 elif hasattr(U,"store") and len(U)==1:
2733 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2735 Un = numpy.asmatrix(numpy.ravel( U )).T
2739 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2740 Xn = CovarianceInflation( Xn,
2741 selfA._parameters["InflationType"],
2742 selfA._parameters["InflationFactor"],
2745 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2746 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2748 returnSerieAsArrayMatrix = True )
2749 qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2750 Xn_predicted = EMX + qi
2751 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2752 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2753 Xn_predicted = Xn_predicted + Cm * Un
2754 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2755 # --- > Par principe, M = Id, Q = 0
2758 #--------------------------
2759 if VariantM == "MLEF13":
2760 Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
2761 EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
2762 Ua = numpy.identity(__m)
2766 Ta = numpy.identity(__m)
2767 vw = numpy.zeros(__m)
2768 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2769 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
2772 E1 = vx1 + _epsilon * EaX
2774 E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
2776 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2778 returnSerieAsArrayMatrix = True )
2779 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2782 EaY = (HE2 - vy2) / _epsilon
2784 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
2786 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
2787 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY)
2788 Deltaw = - numpy.linalg.solve(mH,GradJ)
2793 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2798 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2800 Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
2801 #--------------------------
2803 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2805 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2806 Xn = CovarianceInflation( Xn,
2807 selfA._parameters["InflationType"],
2808 selfA._parameters["InflationFactor"],
2811 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2812 #--------------------------
2814 if selfA._parameters["StoreInternalVariables"] \
2815 or selfA._toStore("CostFunctionJ") \
2816 or selfA._toStore("CostFunctionJb") \
2817 or selfA._toStore("CostFunctionJo") \
2818 or selfA._toStore("APosterioriCovariance") \
2819 or selfA._toStore("InnovationAtCurrentAnalysis") \
2820 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2821 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2822 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2823 _Innovation = Ynpu - _HXa
2825 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2826 # ---> avec analysis
2827 selfA.StoredVariables["Analysis"].store( Xa )
2828 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2829 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2830 if selfA._toStore("InnovationAtCurrentAnalysis"):
2831 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2832 # ---> avec current state
2833 if selfA._parameters["StoreInternalVariables"] \
2834 or selfA._toStore("CurrentState"):
2835 selfA.StoredVariables["CurrentState"].store( Xn )
2836 if selfA._toStore("ForecastState"):
2837 selfA.StoredVariables["ForecastState"].store( EMX )
2838 if selfA._toStore("BMA"):
2839 selfA.StoredVariables["BMA"].store( EMX - Xa )
2840 if selfA._toStore("InnovationAtCurrentState"):
2841 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
2842 if selfA._toStore("SimulatedObservationAtCurrentState") \
2843 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2844 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2846 if selfA._parameters["StoreInternalVariables"] \
2847 or selfA._toStore("CostFunctionJ") \
2848 or selfA._toStore("CostFunctionJb") \
2849 or selfA._toStore("CostFunctionJo") \
2850 or selfA._toStore("CurrentOptimum") \
2851 or selfA._toStore("APosterioriCovariance"):
2852 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2853 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2855 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2856 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2857 selfA.StoredVariables["CostFunctionJ" ].store( J )
2859 if selfA._toStore("IndexOfOptimum") \
2860 or selfA._toStore("CurrentOptimum") \
2861 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2862 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2863 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2864 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2865 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2866 if selfA._toStore("IndexOfOptimum"):
2867 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2868 if selfA._toStore("CurrentOptimum"):
2869 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2870 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2871 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2872 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2873 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2874 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2875 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2876 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2877 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2878 if selfA._toStore("APosterioriCovariance"):
2879 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2880 if selfA._parameters["EstimationOf"] == "Parameters" \
2881 and J < previousJMinimum:
2882 previousJMinimum = J
2884 if selfA._toStore("APosterioriCovariance"):
2885 covarianceXaMin = Pn
2887 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2888 # ----------------------------------------------------------------------
2889 if selfA._parameters["EstimationOf"] == "Parameters":
2890 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2891 selfA.StoredVariables["Analysis"].store( XaMin )
2892 if selfA._toStore("APosterioriCovariance"):
2893 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2894 if selfA._toStore("BMA"):
2895 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2899 # ==============================================================================
2900 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
2901 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
2905 if selfA._parameters["EstimationOf"] == "Parameters":
2906 selfA._parameters["StoreInternalVariables"] = True
2910 H = HO["Direct"].appliedControledFormTo
2912 if selfA._parameters["EstimationOf"] == "State":
2913 M = EM["Direct"].appliedControledFormTo
2915 if CM is not None and "Tangent" in CM and U is not None:
2916 Cm = CM["Tangent"].asMatrix(Xb)
2920 # Nombre de pas identique au nombre de pas d'observations
2921 # -------------------------------------------------------
2922 if hasattr(Y,"stepnumber"):
2923 duration = Y.stepnumber()
2924 __p = numpy.cumprod(Y.shape())[-1]
2927 __p = numpy.array(Y).size
2929 # Précalcul des inversions de B et R
2930 # ----------------------------------
2931 if selfA._parameters["StoreInternalVariables"] \
2932 or selfA._toStore("CostFunctionJ") \
2933 or selfA._toStore("CostFunctionJb") \
2934 or selfA._toStore("CostFunctionJo") \
2935 or selfA._toStore("CurrentOptimum") \
2936 or selfA._toStore("APosterioriCovariance"):
2943 __m = selfA._parameters["NumberOfMembers"]
2944 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2946 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2948 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2950 Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
2952 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2953 selfA.StoredVariables["Analysis"].store( Xb )
2954 if selfA._toStore("APosterioriCovariance"):
2955 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2958 previousJMinimum = numpy.finfo(float).max
2960 for step in range(duration-1):
2961 if hasattr(Y,"store"):
2962 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2964 Ynpu = numpy.ravel( Y ).reshape((__p,1))
2967 if hasattr(U,"store") and len(U)>1:
2968 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2969 elif hasattr(U,"store") and len(U)==1:
2970 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2972 Un = numpy.asmatrix(numpy.ravel( U )).T
2976 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2977 Xn = CovarianceInflation( Xn,
2978 selfA._parameters["InflationType"],
2979 selfA._parameters["InflationFactor"],
2982 #--------------------------
2983 if VariantM == "IEnKF12":
2984 Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
2985 EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
2989 Ta = numpy.identity(__m)
2990 vw = numpy.zeros(__m)
2991 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2992 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
2995 E1 = vx1 + _epsilon * EaX
2997 E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
2999 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
3000 E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
3002 returnSerieAsArrayMatrix = True )
3003 elif selfA._parameters["EstimationOf"] == "Parameters":
3004 # --- > Par principe, M = Id
3006 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
3007 vy1 = H((vx2, Un)).reshape((__p,1))
3009 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
3011 returnSerieAsArrayMatrix = True )
3012 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
3015 EaY = (HE2 - vy2) / _epsilon
3017 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
3019 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
3020 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY)
3021 Deltaw = - numpy.linalg.solve(mH,GradJ)
3026 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
3030 A2 = EnsembleOfAnomalies( E2 )
3033 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
3034 A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
3037 #--------------------------
3039 raise ValueError("VariantM has to be chosen in the authorized methods list.")
3041 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
3042 Xn = CovarianceInflation( Xn,
3043 selfA._parameters["InflationType"],
3044 selfA._parameters["InflationFactor"],
3047 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
3048 #--------------------------
3050 if selfA._parameters["StoreInternalVariables"] \
3051 or selfA._toStore("CostFunctionJ") \
3052 or selfA._toStore("CostFunctionJb") \
3053 or selfA._toStore("CostFunctionJo") \
3054 or selfA._toStore("APosterioriCovariance") \
3055 or selfA._toStore("InnovationAtCurrentAnalysis") \
3056 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
3057 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3058 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
3059 _Innovation = Ynpu - _HXa
3061 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3062 # ---> avec analysis
3063 selfA.StoredVariables["Analysis"].store( Xa )
3064 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
3065 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
3066 if selfA._toStore("InnovationAtCurrentAnalysis"):
3067 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
3068 # ---> avec current state
3069 if selfA._parameters["StoreInternalVariables"] \
3070 or selfA._toStore("CurrentState"):
3071 selfA.StoredVariables["CurrentState"].store( Xn )
3072 if selfA._toStore("ForecastState"):
3073 selfA.StoredVariables["ForecastState"].store( E2 )
3074 if selfA._toStore("BMA"):
3075 selfA.StoredVariables["BMA"].store( E2 - Xa )
3076 if selfA._toStore("InnovationAtCurrentState"):
3077 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
3078 if selfA._toStore("SimulatedObservationAtCurrentState") \
3079 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3080 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
3082 if selfA._parameters["StoreInternalVariables"] \
3083 or selfA._toStore("CostFunctionJ") \
3084 or selfA._toStore("CostFunctionJb") \
3085 or selfA._toStore("CostFunctionJo") \
3086 or selfA._toStore("CurrentOptimum") \
3087 or selfA._toStore("APosterioriCovariance"):
3088 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
3089 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
3091 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3092 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3093 selfA.StoredVariables["CostFunctionJ" ].store( J )
3095 if selfA._toStore("IndexOfOptimum") \
3096 or selfA._toStore("CurrentOptimum") \
3097 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
3098 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
3099 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
3100 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3101 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3102 if selfA._toStore("IndexOfOptimum"):
3103 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3104 if selfA._toStore("CurrentOptimum"):
3105 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
3106 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3107 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
3108 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3109 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3110 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3111 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3112 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3113 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3114 if selfA._toStore("APosterioriCovariance"):
3115 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
3116 if selfA._parameters["EstimationOf"] == "Parameters" \
3117 and J < previousJMinimum:
3118 previousJMinimum = J
3120 if selfA._toStore("APosterioriCovariance"):
3121 covarianceXaMin = Pn
3123 # Stockage final supplémentaire de l'optimum en estimation de paramètres
3124 # ----------------------------------------------------------------------
3125 if selfA._parameters["EstimationOf"] == "Parameters":
3126 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3127 selfA.StoredVariables["Analysis"].store( XaMin )
3128 if selfA._toStore("APosterioriCovariance"):
3129 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3130 if selfA._toStore("BMA"):
3131 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3135 # ==============================================================================
3136 if __name__ == "__main__":
3137 print('\n AUTODIAGNOSTIC\n')