1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les objets numériques génériques.
26 __author__ = "Jean-Philippe ARGAUD"
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
36 # ==============================================================================
37 def ExecuteFunction( triplet ):
38 assert len(triplet) == 3, "Incorrect number of arguments"
39 X, xArgs, funcrepr = triplet
40 __X = numpy.asmatrix(numpy.ravel( X )).T
41 __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42 __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43 __fonction = getattr(__module,funcrepr["__userFunction__name"])
44 sys.path = __sys_path_tmp ; del __sys_path_tmp
45 if isinstance(xArgs, dict):
46 __HX = __fonction( __X, **xArgs )
48 __HX = __fonction( __X )
49 return numpy.ravel( __HX )
51 # ==============================================================================
52 class FDApproximation(object):
54 Cette classe sert d'interface pour définir les opérateurs approximés. A la
55 création d'un objet, en fournissant une fonction "Function", on obtient un
56 objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
57 "AdjointOperator". On contrôle l'approximation DF avec l'incrément
58 multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
59 "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
60 centrées si le booléen "centeredDF" est vrai.
63 name = "FDApproximation",
68 extraArguments = None,
69 avoidingRedundancy = True,
70 toleranceInRedundancy = 1.e-18,
71 lenghtOfRedundancy = -1,
76 self.__name = str(name)
77 self.__extraArgs = extraArguments
80 import multiprocessing
81 self.__mpEnabled = True
83 self.__mpEnabled = False
85 self.__mpEnabled = False
86 self.__mpWorkers = mpWorkers
87 if self.__mpWorkers is not None and self.__mpWorkers < 1:
88 self.__mpWorkers = None
89 logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
92 self.__mfEnabled = True
94 self.__mfEnabled = False
95 logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
97 if avoidingRedundancy:
99 self.__tolerBP = float(toleranceInRedundancy)
100 self.__lenghtRJ = int(lenghtOfRedundancy)
101 self.__listJPCP = [] # Jacobian Previous Calculated Points
102 self.__listJPCI = [] # Jacobian Previous Calculated Increment
103 self.__listJPCR = [] # Jacobian Previous Calculated Results
104 self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
105 self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
107 self.__avoidRC = False
110 if isinstance(Function,types.FunctionType):
111 logging.debug("FDA Calculs en multiprocessing : FunctionType")
112 self.__userFunction__name = Function.__name__
114 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
116 mod = os.path.abspath(Function.__globals__['__file__'])
117 if not os.path.isfile(mod):
118 raise ImportError("No user defined function or method found with the name %s"%(mod,))
119 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
120 self.__userFunction__path = os.path.dirname(mod)
122 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
123 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
124 elif isinstance(Function,types.MethodType):
125 logging.debug("FDA Calculs en multiprocessing : MethodType")
126 self.__userFunction__name = Function.__name__
128 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
130 mod = os.path.abspath(Function.__func__.__globals__['__file__'])
131 if not os.path.isfile(mod):
132 raise ImportError("No user defined function or method found with the name %s"%(mod,))
133 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
134 self.__userFunction__path = os.path.dirname(mod)
136 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
137 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
139 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
141 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
142 self.__userFunction = self.__userOperator.appliedTo
144 self.__centeredDF = bool(centeredDF)
145 if abs(float(increment)) > 1.e-15:
146 self.__increment = float(increment)
148 self.__increment = 0.01
152 self.__dX = numpy.asmatrix(numpy.ravel( dX )).T
153 logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
155 logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
157 # ---------------------------------------------------------
158 def __doublon__(self, e, l, n, v=None):
159 __ac, __iac = False, -1
160 for i in range(len(l)-1,-1,-1):
161 if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
162 __ac, __iac = True, i
163 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
167 # ---------------------------------------------------------
168 def DirectOperator(self, X, **extraArgs ):
170 Calcul du direct à l'aide de la fonction fournie.
172 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
173 ne doivent pas être données ici à la fonction utilisateur.
175 logging.debug("FDA Calcul DirectOperator (explicite)")
177 _HX = self.__userFunction( X, argsAsSerie = True )
179 _X = numpy.asmatrix(numpy.ravel( X )).T
180 _HX = numpy.ravel(self.__userFunction( _X ))
184 # ---------------------------------------------------------
185 def TangentMatrix(self, X ):
187 Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
188 c'est-à-dire le gradient de H en X. On utilise des différences finies
189 directionnelles autour du point X. X est un numpy.matrix.
191 Différences finies centrées (approximation d'ordre 2):
192 1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
193 dX[i] à la composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
194 on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
196 2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
198 3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
200 Différences finies non centrées (approximation d'ordre 1):
201 1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
202 composante X[i] pour composer X_plus_dXi, et on calcule la réponse
203 HX_plus_dXi = H( X_plus_dXi )
204 2/ On calcule la valeur centrale HX = H(X)
205 3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
207 4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
210 logging.debug("FDA Début du calcul de la Jacobienne")
211 logging.debug("FDA Incrément de............: %s*X"%float(self.__increment))
212 logging.debug("FDA Approximation centrée...: %s"%(self.__centeredDF))
214 if X is None or len(X)==0:
215 raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
217 _X = numpy.asmatrix(numpy.ravel( X )).T
219 if self.__dX is None:
220 _dX = self.__increment * _X
222 _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
224 if (_dX == 0.).any():
227 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
229 _dX = numpy.where( _dX == 0., moyenne, _dX )
231 __alreadyCalculated = False
233 __bidon, __alreadyCalculatedP = self.__doublon__(_X, self.__listJPCP, self.__listJPPN, None)
234 __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
235 if __alreadyCalculatedP == __alreadyCalculatedI > -1:
236 __alreadyCalculated, __i = True, __alreadyCalculatedP
237 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
239 if __alreadyCalculated:
240 logging.debug("FDA Calcul Jacobienne (par récupération du doublon %i)"%__i)
241 _Jacobienne = self.__listJPCR[__i]
243 logging.debug("FDA Calcul Jacobienne (explicite)")
244 if self.__centeredDF:
246 if self.__mpEnabled and not self.__mfEnabled:
248 "__userFunction__path" : self.__userFunction__path,
249 "__userFunction__modl" : self.__userFunction__modl,
250 "__userFunction__name" : self.__userFunction__name,
253 for i in range( len(_dX) ):
255 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
256 _X_plus_dXi[i] = _X[i] + _dXi
257 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
258 _X_moins_dXi[i] = _X[i] - _dXi
260 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
261 _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
263 import multiprocessing
264 self.__pool = multiprocessing.Pool(self.__mpWorkers)
265 _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
270 for i in range( len(_dX) ):
271 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
273 elif self.__mfEnabled:
275 for i in range( len(_dX) ):
277 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
278 _X_plus_dXi[i] = _X[i] + _dXi
279 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
280 _X_moins_dXi[i] = _X[i] - _dXi
282 _xserie.append( _X_plus_dXi )
283 _xserie.append( _X_moins_dXi )
285 _HX_plusmoins_dX = self.DirectOperator( _xserie )
288 for i in range( len(_dX) ):
289 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
293 for i in range( _dX.size ):
295 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
296 _X_plus_dXi[i] = _X[i] + _dXi
297 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
298 _X_moins_dXi[i] = _X[i] - _dXi
300 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
301 _HX_moins_dXi = self.DirectOperator( _X_moins_dXi )
303 _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
307 if self.__mpEnabled and not self.__mfEnabled:
309 "__userFunction__path" : self.__userFunction__path,
310 "__userFunction__modl" : self.__userFunction__modl,
311 "__userFunction__name" : self.__userFunction__name,
314 _jobs.append( (_X.A1, self.__extraArgs, funcrepr) )
315 for i in range( len(_dX) ):
316 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
317 _X_plus_dXi[i] = _X[i] + _dX[i]
319 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
321 import multiprocessing
322 self.__pool = multiprocessing.Pool(self.__mpWorkers)
323 _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
327 _HX = _HX_plus_dX.pop(0)
330 for i in range( len(_dX) ):
331 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
333 elif self.__mfEnabled:
335 _xserie.append( _X.A1 )
336 for i in range( len(_dX) ):
337 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
338 _X_plus_dXi[i] = _X[i] + _dX[i]
340 _xserie.append( _X_plus_dXi )
342 _HX_plus_dX = self.DirectOperator( _xserie )
344 _HX = _HX_plus_dX.pop(0)
347 for i in range( len(_dX) ):
348 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
352 _HX = self.DirectOperator( _X )
353 for i in range( _dX.size ):
355 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
356 _X_plus_dXi[i] = _X[i] + _dXi
358 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
360 _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
363 _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
365 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
366 while len(self.__listJPCP) > self.__lenghtRJ:
367 self.__listJPCP.pop(0)
368 self.__listJPCI.pop(0)
369 self.__listJPCR.pop(0)
370 self.__listJPPN.pop(0)
371 self.__listJPIN.pop(0)
372 self.__listJPCP.append( copy.copy(_X) )
373 self.__listJPCI.append( copy.copy(_dX) )
374 self.__listJPCR.append( copy.copy(_Jacobienne) )
375 self.__listJPPN.append( numpy.linalg.norm(_X) )
376 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
378 logging.debug("FDA Fin du calcul de la Jacobienne")
382 # ---------------------------------------------------------
383 def TangentOperator(self, paire, **extraArgs ):
385 Calcul du tangent à l'aide de la Jacobienne.
387 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
388 ne doivent pas être données ici à la fonction utilisateur.
391 assert len(paire) == 1, "Incorrect lenght of arguments"
393 assert len(_paire) == 2, "Incorrect number of arguments"
395 assert len(paire) == 2, "Incorrect number of arguments"
398 _Jacobienne = self.TangentMatrix( X )
399 if dX is None or len(dX) == 0:
401 # Calcul de la forme matricielle si le second argument est None
402 # -------------------------------------------------------------
403 if self.__mfEnabled: return [_Jacobienne,]
404 else: return _Jacobienne
407 # Calcul de la valeur linéarisée de H en X appliqué à dX
408 # ------------------------------------------------------
409 _dX = numpy.asmatrix(numpy.ravel( dX )).T
410 _HtX = numpy.dot(_Jacobienne, _dX)
411 if self.__mfEnabled: return [_HtX.A1,]
414 # ---------------------------------------------------------
415 def AdjointOperator(self, paire, **extraArgs ):
417 Calcul de l'adjoint à l'aide de la Jacobienne.
419 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
420 ne doivent pas être données ici à la fonction utilisateur.
423 assert len(paire) == 1, "Incorrect lenght of arguments"
425 assert len(_paire) == 2, "Incorrect number of arguments"
427 assert len(paire) == 2, "Incorrect number of arguments"
430 _JacobienneT = self.TangentMatrix( X ).T
431 if Y is None or len(Y) == 0:
433 # Calcul de la forme matricielle si le second argument est None
434 # -------------------------------------------------------------
435 if self.__mfEnabled: return [_JacobienneT,]
436 else: return _JacobienneT
439 # Calcul de la valeur de l'adjoint en X appliqué à Y
440 # --------------------------------------------------
441 _Y = numpy.asmatrix(numpy.ravel( Y )).T
442 _HaY = numpy.dot(_JacobienneT, _Y)
443 if self.__mfEnabled: return [_HaY.A1,]
446 # ==============================================================================
447 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
448 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
450 _bgcenter = numpy.ravel(_bgcenter)[:,None]
452 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
454 if _bgcovariance is None:
455 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
457 _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
458 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
460 return BackgroundEnsemble
462 # ==============================================================================
463 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
464 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
465 def __CenteredRandomAnomalies(Zr, N):
467 Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
468 notes manuscrites de MB et conforme au code de PS avec eps = -1
471 Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
472 Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
473 R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
478 _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
480 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
481 if _bgcovariance is None:
482 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
485 U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
486 _nbctl = _bgcenter.size
487 if _nbmembers > _nbctl:
488 _Z = numpy.concatenate((numpy.dot(
489 numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
490 numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
492 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
493 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
494 BackgroundEnsemble = _bgcenter + _Zca
496 if max(abs(_bgcovariance.flatten())) > 0:
497 _nbctl = _bgcenter.size
498 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
499 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
500 BackgroundEnsemble = _bgcenter + _Zca
502 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
504 return BackgroundEnsemble
506 # ==============================================================================
507 def EnsembleOfAnomalies( Ensemble, OptMean = None, Normalisation = 1.):
508 "Renvoie les anomalies centrées à partir d'un ensemble TailleEtat*NbMembres"
510 __Em = numpy.asarray(Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
512 __Em = numpy.ravel(OptMean).reshape((-1,1))
514 return Normalisation * (numpy.asarray(Ensemble) - __Em)
516 # ==============================================================================
517 def EnsembleErrorCovariance( Ensemble ):
518 "Renvoie la covariance d'ensemble"
519 __Anomalies = EnsembleOfAnomalies( Ensemble )
520 __n, __m = numpy.asarray(__Anomalies).shape
521 # Estimation empirique
522 __Covariance = (__Anomalies @ __Anomalies.T) / (__m-1)
524 __Covariance = (__Covariance + __Covariance.T) * 0.5
525 # Assure la positivité
526 __epsilon = mpr*numpy.trace(__Covariance)
527 __Covariance = __Covariance + __epsilon * numpy.identity(__n)
531 # ==============================================================================
532 def CovarianceInflation(
534 InflationType = None,
535 InflationFactor = None,
536 BackgroundCov = None,
539 Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
541 Synthèse : Hunt 2007, section 2.3.5
543 if InflationFactor is None:
546 InflationFactor = float(InflationFactor)
548 if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
549 if InflationFactor < 1.:
550 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
551 if InflationFactor < 1.+mpr:
553 OutputCovOrEns = InflationFactor**2 * InputCovOrEns
555 elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
556 if InflationFactor < 1.:
557 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
558 if InflationFactor < 1.+mpr:
560 InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
561 OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
562 + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
564 elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
565 if InflationFactor < 0.:
566 raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
567 if InflationFactor < mpr:
569 __n, __m = numpy.asarray(InputCovOrEns).shape
571 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
572 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
574 elif InflationType == "HybridOnBackgroundCovariance":
575 if InflationFactor < 0.:
576 raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
577 if InflationFactor < mpr:
579 __n, __m = numpy.asarray(InputCovOrEns).shape
581 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
582 if BackgroundCov is None:
583 raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
584 if InputCovOrEns.shape != BackgroundCov.shape:
585 raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
586 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
588 elif InflationType == "Relaxation":
589 raise NotImplementedError("InflationType Relaxation")
592 raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
594 return OutputCovOrEns
596 # ==============================================================================
597 def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
603 H = HO["Direct"].appliedControledFormTo
605 if selfA._parameters["EstimationOf"] == "State":
606 M = EM["Direct"].appliedControledFormTo
608 if CM is not None and "Tangent" in CM and U is not None:
609 Cm = CM["Tangent"].asMatrix(Xb)
613 # Précalcul des inversions de B et R
616 # Durée d'observation et tailles
617 LagL = selfA._parameters["SmootherLagL"]
618 if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
619 raise ValueError("Fixed-lag smoother requires a series of observation")
620 if Y.stepnumber() < LagL:
621 raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
622 duration = Y.stepnumber()
623 __p = numpy.cumprod(Y.shape())[-1]
625 __m = selfA._parameters["NumberOfMembers"]
627 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
629 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
631 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
632 selfA.StoredVariables["Analysis"].store( Xb )
633 if selfA._toStore("APosterioriCovariance"):
634 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
637 # Calcul direct initial (on privilégie la mémorisation au recalcul)
638 __seed = numpy.random.get_state()
639 selfB = copy.deepcopy(selfA)
640 selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
641 if VariantM == "EnKS16-KalmanFilterFormula":
642 etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
644 raise ValueError("VariantM has to be chosen in the authorized methods list.")
646 EL = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
648 EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
649 selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
651 for step in range(LagL,duration-1):
653 sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
656 if hasattr(Y,"store"):
657 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
659 Ynpu = numpy.ravel( Y ).reshape((__p,1))
662 if hasattr(U,"store") and len(U)>1:
663 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
664 elif hasattr(U,"store") and len(U)==1:
665 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
667 Un = numpy.asmatrix(numpy.ravel( U )).T
671 #--------------------------
672 if VariantM == "EnKS16-KalmanFilterFormula":
673 if selfA._parameters["EstimationOf"] == "State": # Forecast
674 EL = M( [(EL[:,i], Un) for i in range(__m)],
676 returnSerieAsArrayMatrix = True )
677 EL = EL + numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
678 EZ = H( [(EL[:,i], Un) for i in range(__m)],
680 returnSerieAsArrayMatrix = True )
681 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
682 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
684 elif selfA._parameters["EstimationOf"] == "Parameters":
685 # --- > Par principe, M = Id, Q = 0
686 EZ = H( [(EL[:,i], Un) for i in range(__m)],
688 returnSerieAsArrayMatrix = True )
690 vEm = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
691 vZm = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
693 mS = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
694 delta = RIdemi @ ( Ynpu - vZm )
695 mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
696 vw = mT @ mS.T @ delta
698 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
699 mU = numpy.identity(__m)
700 wTU = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
702 EX = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
706 for irl in range(LagL): # Lissage des L précédentes analysis
707 vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
708 EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
709 sEL[irl] = vEm + EX @ wTU
711 # Conservation de l'analyse retrospective d'ordre 0 avant rotation
712 Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
713 if selfA._toStore("APosterioriCovariance"):
716 for irl in range(LagL):
717 sEL[irl] = sEL[irl+1]
719 #--------------------------
721 raise ValueError("VariantM has to be chosen in the authorized methods list.")
723 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
725 selfA.StoredVariables["Analysis"].store( Xa )
726 if selfA._toStore("APosterioriCovariance"):
727 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
729 # Stockage des dernières analyses incomplètement remises à jour
730 for irl in range(LagL):
731 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
732 Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
733 selfA.StoredVariables["Analysis"].store( Xa )
737 # ==============================================================================
738 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
740 Ensemble-Transform EnKF
742 if selfA._parameters["EstimationOf"] == "Parameters":
743 selfA._parameters["StoreInternalVariables"] = True
747 H = HO["Direct"].appliedControledFormTo
749 if selfA._parameters["EstimationOf"] == "State":
750 M = EM["Direct"].appliedControledFormTo
752 if CM is not None and "Tangent" in CM and U is not None:
753 Cm = CM["Tangent"].asMatrix(Xb)
757 # Nombre de pas identique au nombre de pas d'observations
758 # -------------------------------------------------------
759 if hasattr(Y,"stepnumber"):
760 duration = Y.stepnumber()
761 __p = numpy.cumprod(Y.shape())[-1]
764 __p = numpy.array(Y).size
766 # Précalcul des inversions de B et R
767 # ----------------------------------
768 if selfA._parameters["StoreInternalVariables"] \
769 or selfA._toStore("CostFunctionJ") \
770 or selfA._toStore("CostFunctionJb") \
771 or selfA._toStore("CostFunctionJo") \
772 or selfA._toStore("CurrentOptimum") \
773 or selfA._toStore("APosterioriCovariance"):
776 elif VariantM != "KalmanFilterFormula":
778 if VariantM == "KalmanFilterFormula":
784 __m = selfA._parameters["NumberOfMembers"]
785 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
787 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
789 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
790 #~ Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
792 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
793 selfA.StoredVariables["Analysis"].store( Xb )
794 if selfA._toStore("APosterioriCovariance"):
795 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
798 previousJMinimum = numpy.finfo(float).max
800 for step in range(duration-1):
801 if hasattr(Y,"store"):
802 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
804 Ynpu = numpy.ravel( Y ).reshape((__p,1))
807 if hasattr(U,"store") and len(U)>1:
808 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
809 elif hasattr(U,"store") and len(U)==1:
810 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
812 Un = numpy.asmatrix(numpy.ravel( U )).T
816 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
817 Xn = CovarianceInflation( Xn,
818 selfA._parameters["InflationType"],
819 selfA._parameters["InflationFactor"],
822 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
823 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
825 returnSerieAsArrayMatrix = True )
826 qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
827 Xn_predicted = EMX + qi
828 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
830 returnSerieAsArrayMatrix = True )
831 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
832 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
833 Xn_predicted = Xn_predicted + Cm * Un
834 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
835 # --- > Par principe, M = Id, Q = 0
837 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
839 returnSerieAsArrayMatrix = True )
841 # Mean of forecast and observation of forecast
842 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
843 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
846 EaX = EnsembleOfAnomalies( Xn_predicted, Xfm )
847 EaHX = EnsembleOfAnomalies( HX_predicted, Hfm)
849 #--------------------------
850 if VariantM == "KalmanFilterFormula":
851 mS = RIdemi * EaHX / math.sqrt(__m-1)
852 delta = RIdemi * ( Ynpu - Hfm )
853 mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
854 vw = mT @ mS.T @ delta
856 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
857 mU = numpy.identity(__m)
859 EaX = EaX / math.sqrt(__m-1)
860 Xn = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
861 #--------------------------
862 elif VariantM == "Variational":
863 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
865 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
866 _Jo = 0.5 * _A.T @ (RI * _A)
867 _Jb = 0.5 * (__m-1) * w.T @ w
870 def GradientOfCostFunction(w):
871 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
872 _GardJo = - EaHX.T @ (RI * _A)
873 _GradJb = (__m-1) * w.reshape((__m,1))
874 _GradJ = _GardJo + _GradJb
875 return numpy.ravel(_GradJ)
876 vw = scipy.optimize.fmin_cg(
878 x0 = numpy.zeros(__m),
879 fprime = GradientOfCostFunction,
884 Hto = EaHX.T @ (RI * EaHX)
885 Htb = (__m-1) * numpy.identity(__m)
888 Pta = numpy.linalg.inv( Hta )
889 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
891 Xn = Xfm + EaX @ (vw[:,None] + EWa)
892 #--------------------------
893 elif VariantM == "FiniteSize11": # Jauge Boc2011
894 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
896 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
897 _Jo = 0.5 * _A.T @ (RI * _A)
898 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
901 def GradientOfCostFunction(w):
902 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
903 _GardJo = - EaHX.T @ (RI * _A)
904 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
905 _GradJ = _GardJo + _GradJb
906 return numpy.ravel(_GradJ)
907 vw = scipy.optimize.fmin_cg(
909 x0 = numpy.zeros(__m),
910 fprime = GradientOfCostFunction,
915 Hto = EaHX.T @ (RI * EaHX)
917 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
918 / (1 + 1/__m + vw.T @ vw)**2
921 Pta = numpy.linalg.inv( Hta )
922 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
924 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
925 #--------------------------
926 elif VariantM == "FiniteSize15": # Jauge Boc2015
927 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
929 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
930 _Jo = 0.5 * _A.T * RI * _A
931 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
934 def GradientOfCostFunction(w):
935 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
936 _GardJo = - EaHX.T @ (RI * _A)
937 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
938 _GradJ = _GardJo + _GradJb
939 return numpy.ravel(_GradJ)
940 vw = scipy.optimize.fmin_cg(
942 x0 = numpy.zeros(__m),
943 fprime = GradientOfCostFunction,
948 Hto = EaHX.T @ (RI * EaHX)
950 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
951 / (1 + 1/__m + vw.T @ vw)**2
954 Pta = numpy.linalg.inv( Hta )
955 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
957 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
958 #--------------------------
959 elif VariantM == "FiniteSize16": # Jauge Boc2016
960 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
962 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
963 _Jo = 0.5 * _A.T @ (RI * _A)
964 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
967 def GradientOfCostFunction(w):
968 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
969 _GardJo = - EaHX.T @ (RI * _A)
970 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
971 _GradJ = _GardJo + _GradJb
972 return numpy.ravel(_GradJ)
973 vw = scipy.optimize.fmin_cg(
975 x0 = numpy.zeros(__m),
976 fprime = GradientOfCostFunction,
981 Hto = EaHX.T @ (RI * EaHX)
982 Htb = ((__m+1) / (__m-1)) * \
983 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
984 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
987 Pta = numpy.linalg.inv( Hta )
988 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
990 Xn = Xfm + EaX @ (vw[:,None] + EWa)
991 #--------------------------
993 raise ValueError("VariantM has to be chosen in the authorized methods list.")
995 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
996 Xn = CovarianceInflation( Xn,
997 selfA._parameters["InflationType"],
998 selfA._parameters["InflationFactor"],
1001 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1002 #--------------------------
1004 if selfA._parameters["StoreInternalVariables"] \
1005 or selfA._toStore("CostFunctionJ") \
1006 or selfA._toStore("CostFunctionJb") \
1007 or selfA._toStore("CostFunctionJo") \
1008 or selfA._toStore("APosterioriCovariance") \
1009 or selfA._toStore("InnovationAtCurrentAnalysis") \
1010 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1011 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1012 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1013 _Innovation = Ynpu - _HXa
1015 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1016 # ---> avec analysis
1017 selfA.StoredVariables["Analysis"].store( Xa )
1018 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1019 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1020 if selfA._toStore("InnovationAtCurrentAnalysis"):
1021 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1022 # ---> avec current state
1023 if selfA._parameters["StoreInternalVariables"] \
1024 or selfA._toStore("CurrentState"):
1025 selfA.StoredVariables["CurrentState"].store( Xn )
1026 if selfA._toStore("ForecastState"):
1027 selfA.StoredVariables["ForecastState"].store( EMX )
1028 if selfA._toStore("BMA"):
1029 selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
1030 if selfA._toStore("InnovationAtCurrentState"):
1031 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1032 if selfA._toStore("SimulatedObservationAtCurrentState") \
1033 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1034 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1036 if selfA._parameters["StoreInternalVariables"] \
1037 or selfA._toStore("CostFunctionJ") \
1038 or selfA._toStore("CostFunctionJb") \
1039 or selfA._toStore("CostFunctionJo") \
1040 or selfA._toStore("CurrentOptimum") \
1041 or selfA._toStore("APosterioriCovariance"):
1042 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1043 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1045 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1046 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1047 selfA.StoredVariables["CostFunctionJ" ].store( J )
1049 if selfA._toStore("IndexOfOptimum") \
1050 or selfA._toStore("CurrentOptimum") \
1051 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1052 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1053 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1054 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1055 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1056 if selfA._toStore("IndexOfOptimum"):
1057 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1058 if selfA._toStore("CurrentOptimum"):
1059 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1060 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1061 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1062 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1063 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1064 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1065 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1066 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1067 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1068 if selfA._toStore("APosterioriCovariance"):
1069 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1070 if selfA._parameters["EstimationOf"] == "Parameters" \
1071 and J < previousJMinimum:
1072 previousJMinimum = J
1074 if selfA._toStore("APosterioriCovariance"):
1075 covarianceXaMin = Pn
1076 # ---> Pour les smoothers
1077 if selfA._toStore("CurrentEnsembleState"):
1078 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
1080 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1081 # ----------------------------------------------------------------------
1082 if selfA._parameters["EstimationOf"] == "Parameters":
1083 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1084 selfA.StoredVariables["Analysis"].store( XaMin )
1085 if selfA._toStore("APosterioriCovariance"):
1086 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1087 if selfA._toStore("BMA"):
1088 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1092 # ==============================================================================
1093 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
1094 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1098 if selfA._parameters["EstimationOf"] == "Parameters":
1099 selfA._parameters["StoreInternalVariables"] = True
1103 H = HO["Direct"].appliedControledFormTo
1105 if selfA._parameters["EstimationOf"] == "State":
1106 M = EM["Direct"].appliedControledFormTo
1108 if CM is not None and "Tangent" in CM and U is not None:
1109 Cm = CM["Tangent"].asMatrix(Xb)
1113 # Nombre de pas identique au nombre de pas d'observations
1114 # -------------------------------------------------------
1115 if hasattr(Y,"stepnumber"):
1116 duration = Y.stepnumber()
1117 __p = numpy.cumprod(Y.shape())[-1]
1120 __p = numpy.array(Y).size
1122 # Précalcul des inversions de B et R
1123 # ----------------------------------
1124 if selfA._parameters["StoreInternalVariables"] \
1125 or selfA._toStore("CostFunctionJ") \
1126 or selfA._toStore("CostFunctionJb") \
1127 or selfA._toStore("CostFunctionJo") \
1128 or selfA._toStore("CurrentOptimum") \
1129 or selfA._toStore("APosterioriCovariance"):
1136 __m = selfA._parameters["NumberOfMembers"]
1137 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1139 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1141 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
1143 Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
1145 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1146 selfA.StoredVariables["Analysis"].store( Xb )
1147 if selfA._toStore("APosterioriCovariance"):
1148 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1151 previousJMinimum = numpy.finfo(float).max
1153 for step in range(duration-1):
1154 if hasattr(Y,"store"):
1155 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1157 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1160 if hasattr(U,"store") and len(U)>1:
1161 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1162 elif hasattr(U,"store") and len(U)==1:
1163 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1165 Un = numpy.asmatrix(numpy.ravel( U )).T
1169 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1170 Xn = CovarianceInflation( Xn,
1171 selfA._parameters["InflationType"],
1172 selfA._parameters["InflationFactor"],
1175 #--------------------------
1176 if VariantM == "IEnKF12":
1177 Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
1178 EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
1182 Ta = numpy.identity(__m)
1183 vw = numpy.zeros(__m)
1184 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1185 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1188 E1 = vx1 + _epsilon * EaX
1190 E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1192 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
1193 E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1195 returnSerieAsArrayMatrix = True )
1196 elif selfA._parameters["EstimationOf"] == "Parameters":
1197 # --- > Par principe, M = Id
1199 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1200 vy1 = H((vx2, Un)).reshape((__p,1))
1202 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
1204 returnSerieAsArrayMatrix = True )
1205 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1208 EaY = (HE2 - vy2) / _epsilon
1210 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1212 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
1213 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY)
1214 Deltaw = - numpy.linalg.solve(mH,GradJ)
1219 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1223 A2 = EnsembleOfAnomalies( E2 )
1226 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1227 A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
1230 #--------------------------
1232 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1234 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1235 Xn = CovarianceInflation( Xn,
1236 selfA._parameters["InflationType"],
1237 selfA._parameters["InflationFactor"],
1240 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1241 #--------------------------
1243 if selfA._parameters["StoreInternalVariables"] \
1244 or selfA._toStore("CostFunctionJ") \
1245 or selfA._toStore("CostFunctionJb") \
1246 or selfA._toStore("CostFunctionJo") \
1247 or selfA._toStore("APosterioriCovariance") \
1248 or selfA._toStore("InnovationAtCurrentAnalysis") \
1249 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1250 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1251 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1252 _Innovation = Ynpu - _HXa
1254 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1255 # ---> avec analysis
1256 selfA.StoredVariables["Analysis"].store( Xa )
1257 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1258 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1259 if selfA._toStore("InnovationAtCurrentAnalysis"):
1260 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1261 # ---> avec current state
1262 if selfA._parameters["StoreInternalVariables"] \
1263 or selfA._toStore("CurrentState"):
1264 selfA.StoredVariables["CurrentState"].store( Xn )
1265 if selfA._toStore("ForecastState"):
1266 selfA.StoredVariables["ForecastState"].store( E2 )
1267 if selfA._toStore("BMA"):
1268 selfA.StoredVariables["BMA"].store( E2 - Xa )
1269 if selfA._toStore("InnovationAtCurrentState"):
1270 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1271 if selfA._toStore("SimulatedObservationAtCurrentState") \
1272 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1273 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1275 if selfA._parameters["StoreInternalVariables"] \
1276 or selfA._toStore("CostFunctionJ") \
1277 or selfA._toStore("CostFunctionJb") \
1278 or selfA._toStore("CostFunctionJo") \
1279 or selfA._toStore("CurrentOptimum") \
1280 or selfA._toStore("APosterioriCovariance"):
1281 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1282 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1284 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1285 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1286 selfA.StoredVariables["CostFunctionJ" ].store( J )
1288 if selfA._toStore("IndexOfOptimum") \
1289 or selfA._toStore("CurrentOptimum") \
1290 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1291 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1292 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1293 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1294 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1295 if selfA._toStore("IndexOfOptimum"):
1296 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1297 if selfA._toStore("CurrentOptimum"):
1298 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1299 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1300 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1301 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1302 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1303 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1304 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1305 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1306 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1307 if selfA._toStore("APosterioriCovariance"):
1308 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1309 if selfA._parameters["EstimationOf"] == "Parameters" \
1310 and J < previousJMinimum:
1311 previousJMinimum = J
1313 if selfA._toStore("APosterioriCovariance"):
1314 covarianceXaMin = Pn
1316 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1317 # ----------------------------------------------------------------------
1318 if selfA._parameters["EstimationOf"] == "Parameters":
1319 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1320 selfA.StoredVariables["Analysis"].store( XaMin )
1321 if selfA._toStore("APosterioriCovariance"):
1322 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1323 if selfA._toStore("BMA"):
1324 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1328 # ==============================================================================
1329 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1337 # Opérateur non-linéaire pour la boucle externe
1338 Hm = HO["Direct"].appliedTo
1340 # Précalcul des inversions de B et R
1344 # Point de démarrage de l'optimisation
1345 Xini = selfA._parameters["InitializationPoint"]
1347 HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
1348 Innovation = Y - HXb
1355 Xr = Xini.reshape((-1,1))
1356 while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
1360 Ht = HO["Tangent"].asMatrix(Xr)
1361 Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
1363 # Définition de la fonction-coût
1364 # ------------------------------
1365 def CostFunction(dx):
1366 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1367 if selfA._parameters["StoreInternalVariables"] or \
1368 selfA._toStore("CurrentState") or \
1369 selfA._toStore("CurrentOptimum"):
1370 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
1372 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1373 _dInnovation = Innovation - _HdX
1374 if selfA._toStore("SimulatedObservationAtCurrentState") or \
1375 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1376 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
1377 if selfA._toStore("InnovationAtCurrentState"):
1378 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
1380 Jb = float( 0.5 * _dX.T * BI * _dX )
1381 Jo = float( 0.5 * _dInnovation.T * RI * _dInnovation )
1384 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1385 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1386 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1387 selfA.StoredVariables["CostFunctionJ" ].store( J )
1388 if selfA._toStore("IndexOfOptimum") or \
1389 selfA._toStore("CurrentOptimum") or \
1390 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1391 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1392 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1393 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1394 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1395 if selfA._toStore("IndexOfOptimum"):
1396 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1397 if selfA._toStore("CurrentOptimum"):
1398 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1399 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1400 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1401 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1402 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1403 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1404 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1405 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1406 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1409 def GradientOfCostFunction(dx):
1410 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1412 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1413 _dInnovation = Innovation - _HdX
1415 GradJo = - Ht.T @ (RI * _dInnovation)
1416 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1419 # Minimisation de la fonctionnelle
1420 # --------------------------------
1421 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1423 if selfA._parameters["Minimizer"] == "LBFGSB":
1424 # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1425 if "0.19" <= scipy.version.version <= "1.1.0":
1426 import lbfgsbhlt as optimiseur
1428 import scipy.optimize as optimiseur
1429 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1430 func = CostFunction,
1431 x0 = numpy.zeros(Xini.size),
1432 fprime = GradientOfCostFunction,
1434 bounds = selfA._parameters["Bounds"],
1435 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
1436 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
1437 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1438 iprint = selfA._parameters["optiprint"],
1440 nfeval = Informations['funcalls']
1441 rc = Informations['warnflag']
1442 elif selfA._parameters["Minimizer"] == "TNC":
1443 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1444 func = CostFunction,
1445 x0 = numpy.zeros(Xini.size),
1446 fprime = GradientOfCostFunction,
1448 bounds = selfA._parameters["Bounds"],
1449 maxfun = selfA._parameters["MaximumNumberOfSteps"],
1450 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1451 ftol = selfA._parameters["CostDecrementTolerance"],
1452 messages = selfA._parameters["optmessages"],
1454 elif selfA._parameters["Minimizer"] == "CG":
1455 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1457 x0 = numpy.zeros(Xini.size),
1458 fprime = GradientOfCostFunction,
1460 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1461 gtol = selfA._parameters["GradientNormTolerance"],
1462 disp = selfA._parameters["optdisp"],
1465 elif selfA._parameters["Minimizer"] == "NCG":
1466 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1468 x0 = numpy.zeros(Xini.size),
1469 fprime = GradientOfCostFunction,
1471 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1472 avextol = selfA._parameters["CostDecrementTolerance"],
1473 disp = selfA._parameters["optdisp"],
1476 elif selfA._parameters["Minimizer"] == "BFGS":
1477 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1479 x0 = numpy.zeros(Xini.size),
1480 fprime = GradientOfCostFunction,
1482 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1483 gtol = selfA._parameters["GradientNormTolerance"],
1484 disp = selfA._parameters["optdisp"],
1488 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1490 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1491 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1493 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1494 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1495 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1497 Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
1500 DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
1501 iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
1503 # Obtention de l'analyse
1504 # ----------------------
1507 selfA.StoredVariables["Analysis"].store( Xa )
1509 if selfA._toStore("OMA") or \
1510 selfA._toStore("SigmaObs2") or \
1511 selfA._toStore("SimulationQuantiles") or \
1512 selfA._toStore("SimulatedObservationAtOptimum"):
1513 if selfA._toStore("SimulatedObservationAtCurrentState"):
1514 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1515 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1516 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1520 # Calcul de la covariance d'analyse
1521 # ---------------------------------
1522 if selfA._toStore("APosterioriCovariance") or \
1523 selfA._toStore("SimulationQuantiles") or \
1524 selfA._toStore("JacobianMatrixAtOptimum") or \
1525 selfA._toStore("KalmanGainAtOptimum"):
1526 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1527 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1528 if selfA._toStore("APosterioriCovariance") or \
1529 selfA._toStore("SimulationQuantiles") or \
1530 selfA._toStore("KalmanGainAtOptimum"):
1531 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1532 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1533 if selfA._toStore("APosterioriCovariance") or \
1534 selfA._toStore("SimulationQuantiles"):
1538 _ee = numpy.matrix(numpy.zeros(nb)).T
1540 _HtEE = numpy.dot(HtM,_ee)
1541 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
1542 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1543 HessienneI = numpy.matrix( HessienneI )
1545 if min(A.shape) != max(A.shape):
1546 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1547 if (numpy.diag(A) < 0).any():
1548 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1549 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1551 L = numpy.linalg.cholesky( A )
1553 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1554 if selfA._toStore("APosterioriCovariance"):
1555 selfA.StoredVariables["APosterioriCovariance"].store( A )
1556 if selfA._toStore("JacobianMatrixAtOptimum"):
1557 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1558 if selfA._toStore("KalmanGainAtOptimum"):
1559 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1560 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1561 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1563 # Calculs et/ou stockages supplémentaires
1564 # ---------------------------------------
1565 if selfA._toStore("Innovation") or \
1566 selfA._toStore("SigmaObs2") or \
1567 selfA._toStore("MahalanobisConsistency") or \
1568 selfA._toStore("OMB"):
1570 if selfA._toStore("Innovation"):
1571 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1572 if selfA._toStore("BMA"):
1573 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1574 if selfA._toStore("OMA"):
1575 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1576 if selfA._toStore("OMB"):
1577 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1578 if selfA._toStore("SigmaObs2"):
1579 TraceR = R.trace(Y.size)
1580 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1581 if selfA._toStore("MahalanobisConsistency"):
1582 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1583 if selfA._toStore("SimulationQuantiles"):
1584 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1585 HXa = numpy.matrix(numpy.ravel( HXa )).T
1588 for i in range(nech):
1589 if selfA._parameters["SimulationForQuantiles"] == "Linear":
1590 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1591 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1593 if selfA._toStore("SampledStateForQuantiles"): Xr = Xa+dXr
1594 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1595 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1596 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1599 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.ravel(Xr)
1601 YfQ = numpy.hstack((YfQ,Yr))
1602 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.vstack((EXr,numpy.ravel(Xr)))
1605 for quantile in selfA._parameters["Quantiles"]:
1606 if not (0. <= float(quantile) <= 1.): continue
1607 indice = int(nech * float(quantile) - 1./nech)
1608 if YQ is None: YQ = YfQ[:,indice]
1609 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
1610 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1611 if selfA._toStore("SampledStateForQuantiles"):
1612 selfA.StoredVariables["SampledStateForQuantiles"].store( EXr.T )
1613 if selfA._toStore("SimulatedObservationAtBackground"):
1614 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1615 if selfA._toStore("SimulatedObservationAtOptimum"):
1616 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1620 # ==============================================================================
1621 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
1622 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1624 Maximum Likelihood Ensemble Filter
1626 if selfA._parameters["EstimationOf"] == "Parameters":
1627 selfA._parameters["StoreInternalVariables"] = True
1631 H = HO["Direct"].appliedControledFormTo
1633 if selfA._parameters["EstimationOf"] == "State":
1634 M = EM["Direct"].appliedControledFormTo
1636 if CM is not None and "Tangent" in CM and U is not None:
1637 Cm = CM["Tangent"].asMatrix(Xb)
1641 # Nombre de pas identique au nombre de pas d'observations
1642 # -------------------------------------------------------
1643 if hasattr(Y,"stepnumber"):
1644 duration = Y.stepnumber()
1645 __p = numpy.cumprod(Y.shape())[-1]
1648 __p = numpy.array(Y).size
1650 # Précalcul des inversions de B et R
1651 # ----------------------------------
1652 if selfA._parameters["StoreInternalVariables"] \
1653 or selfA._toStore("CostFunctionJ") \
1654 or selfA._toStore("CostFunctionJb") \
1655 or selfA._toStore("CostFunctionJo") \
1656 or selfA._toStore("CurrentOptimum") \
1657 or selfA._toStore("APosterioriCovariance"):
1664 __m = selfA._parameters["NumberOfMembers"]
1665 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1667 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1669 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
1671 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1673 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1674 selfA.StoredVariables["Analysis"].store( Xb )
1675 if selfA._toStore("APosterioriCovariance"):
1676 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1679 previousJMinimum = numpy.finfo(float).max
1681 for step in range(duration-1):
1682 if hasattr(Y,"store"):
1683 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1685 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1688 if hasattr(U,"store") and len(U)>1:
1689 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1690 elif hasattr(U,"store") and len(U)==1:
1691 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1693 Un = numpy.asmatrix(numpy.ravel( U )).T
1697 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1698 Xn = CovarianceInflation( Xn,
1699 selfA._parameters["InflationType"],
1700 selfA._parameters["InflationFactor"],
1703 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1704 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1706 returnSerieAsArrayMatrix = True )
1707 qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
1708 Xn_predicted = EMX + qi
1709 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1710 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1711 Xn_predicted = Xn_predicted + Cm * Un
1712 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1713 # --- > Par principe, M = Id, Q = 0
1716 #--------------------------
1717 if VariantM == "MLEF13":
1718 Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
1719 EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
1720 Ua = numpy.identity(__m)
1724 Ta = numpy.identity(__m)
1725 vw = numpy.zeros(__m)
1726 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1727 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1730 E1 = vx1 + _epsilon * EaX
1732 E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1734 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1736 returnSerieAsArrayMatrix = True )
1737 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1740 EaY = (HE2 - vy2) / _epsilon
1742 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1744 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
1745 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY)
1746 Deltaw = - numpy.linalg.solve(mH,GradJ)
1751 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1756 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1758 Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
1759 #--------------------------
1761 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1763 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1764 Xn = CovarianceInflation( Xn,
1765 selfA._parameters["InflationType"],
1766 selfA._parameters["InflationFactor"],
1769 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1770 #--------------------------
1772 if selfA._parameters["StoreInternalVariables"] \
1773 or selfA._toStore("CostFunctionJ") \
1774 or selfA._toStore("CostFunctionJb") \
1775 or selfA._toStore("CostFunctionJo") \
1776 or selfA._toStore("APosterioriCovariance") \
1777 or selfA._toStore("InnovationAtCurrentAnalysis") \
1778 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1779 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1780 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1781 _Innovation = Ynpu - _HXa
1783 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1784 # ---> avec analysis
1785 selfA.StoredVariables["Analysis"].store( Xa )
1786 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1787 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1788 if selfA._toStore("InnovationAtCurrentAnalysis"):
1789 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1790 # ---> avec current state
1791 if selfA._parameters["StoreInternalVariables"] \
1792 or selfA._toStore("CurrentState"):
1793 selfA.StoredVariables["CurrentState"].store( Xn )
1794 if selfA._toStore("ForecastState"):
1795 selfA.StoredVariables["ForecastState"].store( EMX )
1796 if selfA._toStore("BMA"):
1797 selfA.StoredVariables["BMA"].store( EMX - Xa )
1798 if selfA._toStore("InnovationAtCurrentState"):
1799 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1800 if selfA._toStore("SimulatedObservationAtCurrentState") \
1801 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1802 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1804 if selfA._parameters["StoreInternalVariables"] \
1805 or selfA._toStore("CostFunctionJ") \
1806 or selfA._toStore("CostFunctionJb") \
1807 or selfA._toStore("CostFunctionJo") \
1808 or selfA._toStore("CurrentOptimum") \
1809 or selfA._toStore("APosterioriCovariance"):
1810 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1811 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1813 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1814 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1815 selfA.StoredVariables["CostFunctionJ" ].store( J )
1817 if selfA._toStore("IndexOfOptimum") \
1818 or selfA._toStore("CurrentOptimum") \
1819 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1820 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1821 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1822 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1823 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1824 if selfA._toStore("IndexOfOptimum"):
1825 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1826 if selfA._toStore("CurrentOptimum"):
1827 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1828 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1829 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1830 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1831 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1832 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1833 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1834 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1835 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1836 if selfA._toStore("APosterioriCovariance"):
1837 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1838 if selfA._parameters["EstimationOf"] == "Parameters" \
1839 and J < previousJMinimum:
1840 previousJMinimum = J
1842 if selfA._toStore("APosterioriCovariance"):
1843 covarianceXaMin = Pn
1845 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1846 # ----------------------------------------------------------------------
1847 if selfA._parameters["EstimationOf"] == "Parameters":
1848 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1849 selfA.StoredVariables["Analysis"].store( XaMin )
1850 if selfA._toStore("APosterioriCovariance"):
1851 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1852 if selfA._toStore("BMA"):
1853 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1857 # ==============================================================================
1869 Implémentation informatique de l'algorithme MMQR, basée sur la publication :
1870 David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
1871 Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
1874 # Recuperation des donnees et informations initiales
1875 # --------------------------------------------------
1876 variables = numpy.ravel( x0 )
1877 mesures = numpy.ravel( y )
1878 increment = sys.float_info[0]
1881 quantile = float(quantile)
1883 # Calcul des parametres du MM
1884 # ---------------------------
1885 tn = float(toler) / n
1886 e0 = -tn / math.log(tn)
1887 epsilon = (e0-tn)/(1+math.log(e0))
1889 # Calculs d'initialisation
1890 # ------------------------
1891 residus = mesures - numpy.ravel( func( variables ) )
1892 poids = 1./(epsilon+numpy.abs(residus))
1893 veps = 1. - 2. * quantile - residus * poids
1894 lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
1897 # Recherche iterative
1898 # -------------------
1899 while (increment > toler) and (iteration < maxfun) :
1902 Derivees = numpy.array(fprime(variables))
1903 Derivees = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
1904 DeriveesT = Derivees.transpose()
1905 M = numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
1906 SM = numpy.transpose(numpy.dot( DeriveesT , veps ))
1907 step = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
1909 variables = variables + step
1910 if bounds is not None:
1911 # Attention : boucle infinie à éviter si un intervalle est trop petit
1912 while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
1914 variables = variables - step
1915 residus = mesures - numpy.ravel( func(variables) )
1916 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
1918 while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
1920 variables = variables - step
1921 residus = mesures - numpy.ravel( func(variables) )
1922 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
1924 increment = lastsurrogate-surrogate
1925 poids = 1./(epsilon+numpy.abs(residus))
1926 veps = 1. - 2. * quantile - residus * poids
1927 lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
1931 Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
1933 return variables, Ecart, [n,p,iteration,increment,0]
1935 # ==============================================================================
1936 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
1938 3DVAR multi-pas et multi-méthodes
1943 Xn = numpy.ravel(Xb).reshape((-1,1))
1945 if selfA._parameters["EstimationOf"] == "State":
1946 M = EM["Direct"].appliedTo
1948 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1949 selfA.StoredVariables["Analysis"].store( Xn )
1950 if selfA._toStore("APosterioriCovariance"):
1951 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(Xn.size)
1953 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1954 if selfA._toStore("ForecastState"):
1955 selfA.StoredVariables["ForecastState"].store( Xn )
1957 if hasattr(Y,"stepnumber"):
1958 duration = Y.stepnumber()
1964 for step in range(duration-1):
1965 if hasattr(Y,"store"):
1966 Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
1968 Ynpu = numpy.ravel( Y ).reshape((-1,1))
1970 if selfA._parameters["EstimationOf"] == "State": # Forecast
1971 Xn = selfA.StoredVariables["Analysis"][-1]
1972 Xn_predicted = M( Xn )
1973 if selfA._toStore("ForecastState"):
1974 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1975 elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
1976 # --- > Par principe, M = Id, Q = 0
1978 Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
1980 oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
1984 # ==============================================================================
1985 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1994 Hm = HO["Direct"].appliedTo
1996 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
1997 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
1998 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2001 HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2002 if Y.size != HXb.size:
2003 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2004 if max(Y.shape) != max(HXb.shape):
2005 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2007 if selfA._toStore("JacobianMatrixAtBackground"):
2008 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2009 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2010 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2012 Ht = HO["Tangent"].asMatrix(Xb)
2014 HBHTpR = R + Ht * BHT
2015 Innovation = Y - HXb
2017 # Point de démarrage de l'optimisation
2018 Xini = numpy.zeros(Xb.shape)
2020 # Définition de la fonction-coût
2021 # ------------------------------
2022 def CostFunction(w):
2023 _W = numpy.asmatrix(numpy.ravel( w )).T
2024 if selfA._parameters["StoreInternalVariables"] or \
2025 selfA._toStore("CurrentState") or \
2026 selfA._toStore("CurrentOptimum"):
2027 selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
2028 if selfA._toStore("SimulatedObservationAtCurrentState") or \
2029 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2030 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
2031 if selfA._toStore("InnovationAtCurrentState"):
2032 selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
2034 Jb = float( 0.5 * _W.T * HBHTpR * _W )
2035 Jo = float( - _W.T * Innovation )
2038 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2039 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2040 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2041 selfA.StoredVariables["CostFunctionJ" ].store( J )
2042 if selfA._toStore("IndexOfOptimum") or \
2043 selfA._toStore("CurrentOptimum") or \
2044 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2045 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2046 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2047 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2048 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2049 if selfA._toStore("IndexOfOptimum"):
2050 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2051 if selfA._toStore("CurrentOptimum"):
2052 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2053 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2054 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2055 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2056 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2057 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2058 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2059 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2060 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2063 def GradientOfCostFunction(w):
2064 _W = numpy.asmatrix(numpy.ravel( w )).T
2065 GradJb = HBHTpR * _W
2066 GradJo = - Innovation
2067 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2070 # Minimisation de la fonctionnelle
2071 # --------------------------------
2072 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2074 if selfA._parameters["Minimizer"] == "LBFGSB":
2075 if "0.19" <= scipy.version.version <= "1.1.0":
2076 import lbfgsbhlt as optimiseur
2078 import scipy.optimize as optimiseur
2079 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2080 func = CostFunction,
2082 fprime = GradientOfCostFunction,
2084 bounds = selfA._parameters["Bounds"],
2085 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
2086 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
2087 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2088 iprint = selfA._parameters["optiprint"],
2090 nfeval = Informations['funcalls']
2091 rc = Informations['warnflag']
2092 elif selfA._parameters["Minimizer"] == "TNC":
2093 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2094 func = CostFunction,
2096 fprime = GradientOfCostFunction,
2098 bounds = selfA._parameters["Bounds"],
2099 maxfun = selfA._parameters["MaximumNumberOfSteps"],
2100 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2101 ftol = selfA._parameters["CostDecrementTolerance"],
2102 messages = selfA._parameters["optmessages"],
2104 elif selfA._parameters["Minimizer"] == "CG":
2105 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2108 fprime = GradientOfCostFunction,
2110 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2111 gtol = selfA._parameters["GradientNormTolerance"],
2112 disp = selfA._parameters["optdisp"],
2115 elif selfA._parameters["Minimizer"] == "NCG":
2116 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2119 fprime = GradientOfCostFunction,
2121 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2122 avextol = selfA._parameters["CostDecrementTolerance"],
2123 disp = selfA._parameters["optdisp"],
2126 elif selfA._parameters["Minimizer"] == "BFGS":
2127 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2130 fprime = GradientOfCostFunction,
2132 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2133 gtol = selfA._parameters["GradientNormTolerance"],
2134 disp = selfA._parameters["optdisp"],
2138 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2140 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2141 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2143 # Correction pour pallier a un bug de TNC sur le retour du Minimum
2144 # ----------------------------------------------------------------
2145 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2146 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2147 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
2149 Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
2151 # Obtention de l'analyse
2152 # ----------------------
2155 selfA.StoredVariables["Analysis"].store( Xa )
2157 if selfA._toStore("OMA") or \
2158 selfA._toStore("SigmaObs2") or \
2159 selfA._toStore("SimulationQuantiles") or \
2160 selfA._toStore("SimulatedObservationAtOptimum"):
2161 if selfA._toStore("SimulatedObservationAtCurrentState"):
2162 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2163 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2164 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2168 # Calcul de la covariance d'analyse
2169 # ---------------------------------
2170 if selfA._toStore("APosterioriCovariance") or \
2171 selfA._toStore("SimulationQuantiles") or \
2172 selfA._toStore("JacobianMatrixAtOptimum") or \
2173 selfA._toStore("KalmanGainAtOptimum"):
2174 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2175 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2176 if selfA._toStore("APosterioriCovariance") or \
2177 selfA._toStore("SimulationQuantiles") or \
2178 selfA._toStore("KalmanGainAtOptimum"):
2179 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2180 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2181 if selfA._toStore("APosterioriCovariance") or \
2182 selfA._toStore("SimulationQuantiles"):
2188 _ee = numpy.matrix(numpy.zeros(nb)).T
2190 _HtEE = numpy.dot(HtM,_ee)
2191 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
2192 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2193 HessienneI = numpy.matrix( HessienneI )
2195 if min(A.shape) != max(A.shape):
2196 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2197 if (numpy.diag(A) < 0).any():
2198 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2199 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2201 L = numpy.linalg.cholesky( A )
2203 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2204 if selfA._toStore("APosterioriCovariance"):
2205 selfA.StoredVariables["APosterioriCovariance"].store( A )
2206 if selfA._toStore("JacobianMatrixAtOptimum"):
2207 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2208 if selfA._toStore("KalmanGainAtOptimum"):
2209 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2210 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2211 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2213 # Calculs et/ou stockages supplémentaires
2214 # ---------------------------------------
2215 if selfA._toStore("Innovation") or \
2216 selfA._toStore("SigmaObs2") or \
2217 selfA._toStore("MahalanobisConsistency") or \
2218 selfA._toStore("OMB"):
2220 if selfA._toStore("Innovation"):
2221 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2222 if selfA._toStore("BMA"):
2223 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2224 if selfA._toStore("OMA"):
2225 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2226 if selfA._toStore("OMB"):
2227 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2228 if selfA._toStore("SigmaObs2"):
2229 TraceR = R.trace(Y.size)
2230 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2231 if selfA._toStore("MahalanobisConsistency"):
2232 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2233 if selfA._toStore("SimulationQuantiles"):
2234 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
2235 HXa = numpy.matrix(numpy.ravel( HXa )).T
2238 for i in range(nech):
2239 if selfA._parameters["SimulationForQuantiles"] == "Linear":
2240 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
2241 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
2243 if selfA._toStore("SampledStateForQuantiles"): Xr = Xa+dXr
2244 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
2245 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
2246 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
2249 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.ravel(Xr)
2251 YfQ = numpy.hstack((YfQ,Yr))
2252 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.vstack((EXr,numpy.ravel(Xr)))
2255 for quantile in selfA._parameters["Quantiles"]:
2256 if not (0. <= float(quantile) <= 1.): continue
2257 indice = int(nech * float(quantile) - 1./nech)
2258 if YQ is None: YQ = YfQ[:,indice]
2259 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
2260 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
2261 if selfA._toStore("SampledStateForQuantiles"):
2262 selfA.StoredVariables["SampledStateForQuantiles"].store( EXr.T )
2263 if selfA._toStore("SimulatedObservationAtBackground"):
2264 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2265 if selfA._toStore("SimulatedObservationAtOptimum"):
2266 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2270 # ==============================================================================
2271 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
2275 if selfA._parameters["EstimationOf"] == "Parameters":
2276 selfA._parameters["StoreInternalVariables"] = True
2279 H = HO["Direct"].appliedControledFormTo
2281 if selfA._parameters["EstimationOf"] == "State":
2282 M = EM["Direct"].appliedControledFormTo
2284 if CM is not None and "Tangent" in CM and U is not None:
2285 Cm = CM["Tangent"].asMatrix(Xb)
2289 # Durée d'observation et tailles
2290 if hasattr(Y,"stepnumber"):
2291 duration = Y.stepnumber()
2292 __p = numpy.cumprod(Y.shape())[-1]
2295 __p = numpy.array(Y).size
2297 # Précalcul des inversions de B et R
2298 if selfA._parameters["StoreInternalVariables"] \
2299 or selfA._toStore("CostFunctionJ") \
2300 or selfA._toStore("CostFunctionJb") \
2301 or selfA._toStore("CostFunctionJo") \
2302 or selfA._toStore("CurrentOptimum") \
2303 or selfA._toStore("APosterioriCovariance"):
2308 __m = selfA._parameters["NumberOfMembers"]
2310 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2312 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2314 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2316 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2318 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2319 selfA.StoredVariables["Analysis"].store( Xb )
2320 if selfA._toStore("APosterioriCovariance"):
2321 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2324 previousJMinimum = numpy.finfo(float).max
2326 for step in range(duration-1):
2327 if hasattr(Y,"store"):
2328 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2330 Ynpu = numpy.ravel( Y ).reshape((__p,1))
2333 if hasattr(U,"store") and len(U)>1:
2334 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2335 elif hasattr(U,"store") and len(U)==1:
2336 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2338 Un = numpy.asmatrix(numpy.ravel( U )).T
2342 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2343 Xn = CovarianceInflation( Xn,
2344 selfA._parameters["InflationType"],
2345 selfA._parameters["InflationFactor"],
2348 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2349 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2351 returnSerieAsArrayMatrix = True )
2352 qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2353 Xn_predicted = EMX + qi
2354 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2356 returnSerieAsArrayMatrix = True )
2357 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2358 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2359 Xn_predicted = Xn_predicted + Cm * Un
2360 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2361 # --- > Par principe, M = Id, Q = 0
2363 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2365 returnSerieAsArrayMatrix = True )
2367 # Mean of forecast and observation of forecast
2368 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2369 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2371 #--------------------------
2372 if VariantM == "KalmanFilterFormula05":
2373 PfHT, HPfHT = 0., 0.
2374 for i in range(__m):
2375 Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
2376 Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
2377 PfHT += Exfi * Eyfi.T
2378 HPfHT += Eyfi * Eyfi.T
2379 PfHT = (1./(__m-1)) * PfHT
2380 HPfHT = (1./(__m-1)) * HPfHT
2381 Kn = PfHT * ( R + HPfHT ).I
2384 for i in range(__m):
2385 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
2386 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
2387 #--------------------------
2388 elif VariantM == "KalmanFilterFormula16":
2389 EpY = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
2390 EpYm = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2392 EaX = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
2393 EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
2395 Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
2397 for i in range(__m):
2398 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
2399 #--------------------------
2401 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2403 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2404 Xn = CovarianceInflation( Xn,
2405 selfA._parameters["InflationType"],
2406 selfA._parameters["InflationFactor"],
2409 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2410 #--------------------------
2412 if selfA._parameters["StoreInternalVariables"] \
2413 or selfA._toStore("CostFunctionJ") \
2414 or selfA._toStore("CostFunctionJb") \
2415 or selfA._toStore("CostFunctionJo") \
2416 or selfA._toStore("APosterioriCovariance") \
2417 or selfA._toStore("InnovationAtCurrentAnalysis") \
2418 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2419 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2420 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2421 _Innovation = Ynpu - _HXa
2423 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2424 # ---> avec analysis
2425 selfA.StoredVariables["Analysis"].store( Xa )
2426 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2427 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2428 if selfA._toStore("InnovationAtCurrentAnalysis"):
2429 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2430 # ---> avec current state
2431 if selfA._parameters["StoreInternalVariables"] \
2432 or selfA._toStore("CurrentState"):
2433 selfA.StoredVariables["CurrentState"].store( Xn )
2434 if selfA._toStore("ForecastState"):
2435 selfA.StoredVariables["ForecastState"].store( EMX )
2436 if selfA._toStore("BMA"):
2437 selfA.StoredVariables["BMA"].store( EMX - Xa )
2438 if selfA._toStore("InnovationAtCurrentState"):
2439 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
2440 if selfA._toStore("SimulatedObservationAtCurrentState") \
2441 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2442 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2444 if selfA._parameters["StoreInternalVariables"] \
2445 or selfA._toStore("CostFunctionJ") \
2446 or selfA._toStore("CostFunctionJb") \
2447 or selfA._toStore("CostFunctionJo") \
2448 or selfA._toStore("CurrentOptimum") \
2449 or selfA._toStore("APosterioriCovariance"):
2450 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2451 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2453 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2454 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2455 selfA.StoredVariables["CostFunctionJ" ].store( J )
2457 if selfA._toStore("IndexOfOptimum") \
2458 or selfA._toStore("CurrentOptimum") \
2459 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2460 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2461 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2462 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2463 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2464 if selfA._toStore("IndexOfOptimum"):
2465 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2466 if selfA._toStore("CurrentOptimum"):
2467 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2468 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2469 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2470 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2471 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2472 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2473 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2474 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2475 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2476 if selfA._toStore("APosterioriCovariance"):
2477 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2478 if selfA._parameters["EstimationOf"] == "Parameters" \
2479 and J < previousJMinimum:
2480 previousJMinimum = J
2482 if selfA._toStore("APosterioriCovariance"):
2483 covarianceXaMin = Pn
2485 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2486 # ----------------------------------------------------------------------
2487 if selfA._parameters["EstimationOf"] == "Parameters":
2488 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2489 selfA.StoredVariables["Analysis"].store( XaMin )
2490 if selfA._toStore("APosterioriCovariance"):
2491 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2492 if selfA._toStore("BMA"):
2493 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2497 # ==============================================================================
2498 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2507 Hm = HO["Direct"].appliedTo
2508 Ha = HO["Adjoint"].appliedInXTo
2510 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
2511 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2512 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2515 HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2516 if Y.size != HXb.size:
2517 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2518 if max(Y.shape) != max(HXb.shape):
2519 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2521 if selfA._toStore("JacobianMatrixAtBackground"):
2522 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2523 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2524 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2526 # Précalcul des inversions de B et R
2530 # Point de démarrage de l'optimisation
2531 Xini = selfA._parameters["InitializationPoint"]
2533 # Définition de la fonction-coût
2534 # ------------------------------
2535 def CostFunction(x):
2536 _X = numpy.asmatrix(numpy.ravel( x )).T
2537 if selfA._parameters["StoreInternalVariables"] or \
2538 selfA._toStore("CurrentState") or \
2539 selfA._toStore("CurrentOptimum"):
2540 selfA.StoredVariables["CurrentState"].store( _X )
2542 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2543 _Innovation = Y - _HX
2544 if selfA._toStore("SimulatedObservationAtCurrentState") or \
2545 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2546 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
2547 if selfA._toStore("InnovationAtCurrentState"):
2548 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
2550 Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
2551 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2554 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2555 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2556 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2557 selfA.StoredVariables["CostFunctionJ" ].store( J )
2558 if selfA._toStore("IndexOfOptimum") or \
2559 selfA._toStore("CurrentOptimum") or \
2560 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2561 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2562 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2563 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2564 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2565 if selfA._toStore("IndexOfOptimum"):
2566 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2567 if selfA._toStore("CurrentOptimum"):
2568 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2569 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2570 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2571 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2572 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2573 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2574 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2575 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2576 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2579 def GradientOfCostFunction(x):
2580 _X = numpy.asmatrix(numpy.ravel( x )).T
2582 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2583 GradJb = BI * (_X - Xb)
2584 GradJo = - Ha( (_X, RI * (Y - _HX)) )
2585 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2588 # Minimisation de la fonctionnelle
2589 # --------------------------------
2590 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2592 if selfA._parameters["Minimizer"] == "LBFGSB":
2593 if "0.19" <= scipy.version.version <= "1.1.0":
2594 import lbfgsbhlt as optimiseur
2596 import scipy.optimize as optimiseur
2597 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2598 func = CostFunction,
2600 fprime = GradientOfCostFunction,
2602 bounds = selfA._parameters["Bounds"],
2603 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
2604 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
2605 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2606 iprint = selfA._parameters["optiprint"],
2608 nfeval = Informations['funcalls']
2609 rc = Informations['warnflag']
2610 elif selfA._parameters["Minimizer"] == "TNC":
2611 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2612 func = CostFunction,
2614 fprime = GradientOfCostFunction,
2616 bounds = selfA._parameters["Bounds"],
2617 maxfun = selfA._parameters["MaximumNumberOfSteps"],
2618 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2619 ftol = selfA._parameters["CostDecrementTolerance"],
2620 messages = selfA._parameters["optmessages"],
2622 elif selfA._parameters["Minimizer"] == "CG":
2623 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2626 fprime = GradientOfCostFunction,
2628 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2629 gtol = selfA._parameters["GradientNormTolerance"],
2630 disp = selfA._parameters["optdisp"],
2633 elif selfA._parameters["Minimizer"] == "NCG":
2634 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2637 fprime = GradientOfCostFunction,
2639 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2640 avextol = selfA._parameters["CostDecrementTolerance"],
2641 disp = selfA._parameters["optdisp"],
2644 elif selfA._parameters["Minimizer"] == "BFGS":
2645 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2648 fprime = GradientOfCostFunction,
2650 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2651 gtol = selfA._parameters["GradientNormTolerance"],
2652 disp = selfA._parameters["optdisp"],
2656 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2658 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2659 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2661 # Correction pour pallier a un bug de TNC sur le retour du Minimum
2662 # ----------------------------------------------------------------
2663 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2664 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2666 # Obtention de l'analyse
2667 # ----------------------
2668 Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
2670 selfA.StoredVariables["Analysis"].store( Xa )
2672 if selfA._toStore("OMA") or \
2673 selfA._toStore("SigmaObs2") or \
2674 selfA._toStore("SimulationQuantiles") or \
2675 selfA._toStore("SimulatedObservationAtOptimum"):
2676 if selfA._toStore("SimulatedObservationAtCurrentState"):
2677 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2678 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2679 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2683 # Calcul de la covariance d'analyse
2684 # ---------------------------------
2685 if selfA._toStore("APosterioriCovariance") or \
2686 selfA._toStore("SimulationQuantiles") or \
2687 selfA._toStore("JacobianMatrixAtOptimum") or \
2688 selfA._toStore("KalmanGainAtOptimum"):
2689 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2690 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2691 if selfA._toStore("APosterioriCovariance") or \
2692 selfA._toStore("SimulationQuantiles") or \
2693 selfA._toStore("KalmanGainAtOptimum"):
2694 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2695 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2696 if selfA._toStore("APosterioriCovariance") or \
2697 selfA._toStore("SimulationQuantiles"):
2701 _ee = numpy.matrix(numpy.zeros(nb)).T
2703 _HtEE = numpy.dot(HtM,_ee)
2704 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
2705 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2706 HessienneI = numpy.matrix( HessienneI )
2708 if min(A.shape) != max(A.shape):
2709 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2710 if (numpy.diag(A) < 0).any():
2711 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2712 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2714 L = numpy.linalg.cholesky( A )
2716 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2717 if selfA._toStore("APosterioriCovariance"):
2718 selfA.StoredVariables["APosterioriCovariance"].store( A )
2719 if selfA._toStore("JacobianMatrixAtOptimum"):
2720 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2721 if selfA._toStore("KalmanGainAtOptimum"):
2722 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2723 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2724 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2726 # Calculs et/ou stockages supplémentaires
2727 # ---------------------------------------
2728 if selfA._toStore("Innovation") or \
2729 selfA._toStore("SigmaObs2") or \
2730 selfA._toStore("MahalanobisConsistency") or \
2731 selfA._toStore("OMB"):
2733 if selfA._toStore("Innovation"):
2734 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2735 if selfA._toStore("BMA"):
2736 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2737 if selfA._toStore("OMA"):
2738 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2739 if selfA._toStore("OMB"):
2740 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2741 if selfA._toStore("SigmaObs2"):
2742 TraceR = R.trace(Y.size)
2743 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2744 if selfA._toStore("MahalanobisConsistency"):
2745 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2746 if selfA._toStore("SimulationQuantiles"):
2747 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
2748 HXa = numpy.matrix(numpy.ravel( HXa )).T
2751 for i in range(nech):
2752 if selfA._parameters["SimulationForQuantiles"] == "Linear":
2753 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
2754 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
2756 if selfA._toStore("SampledStateForQuantiles"): Xr = Xa+dXr
2757 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
2758 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
2759 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
2762 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.ravel(Xr)
2764 YfQ = numpy.hstack((YfQ,Yr))
2765 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.vstack((EXr,numpy.ravel(Xr)))
2768 for quantile in selfA._parameters["Quantiles"]:
2769 if not (0. <= float(quantile) <= 1.): continue
2770 indice = int(nech * float(quantile) - 1./nech)
2771 if YQ is None: YQ = YfQ[:,indice]
2772 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
2773 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
2774 if selfA._toStore("SampledStateForQuantiles"):
2775 selfA.StoredVariables["SampledStateForQuantiles"].store( EXr.T )
2776 if selfA._toStore("SimulatedObservationAtBackground"):
2777 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2778 if selfA._toStore("SimulatedObservationAtOptimum"):
2779 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2783 # ==============================================================================
2784 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2793 Hm = HO["Direct"].appliedControledFormTo
2794 Mm = EM["Direct"].appliedControledFormTo
2796 if CM is not None and "Tangent" in CM and U is not None:
2797 Cm = CM["Tangent"].asMatrix(Xb)
2803 if hasattr(U,"store") and 1<=_step<len(U) :
2804 _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
2805 elif hasattr(U,"store") and len(U)==1:
2806 _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2808 _Un = numpy.asmatrix(numpy.ravel( U )).T
2813 if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
2814 _Cm = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
2820 # Remarque : les observations sont exploitées à partir du pas de temps
2821 # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
2822 # Donc le pas 0 n'est pas utilisé puisque la première étape commence
2823 # avec l'observation du pas 1.
2825 # Nombre de pas identique au nombre de pas d'observations
2826 if hasattr(Y,"stepnumber"):
2827 duration = Y.stepnumber()
2831 # Précalcul des inversions de B et R
2835 # Point de démarrage de l'optimisation
2836 Xini = selfA._parameters["InitializationPoint"]
2838 # Définition de la fonction-coût
2839 # ------------------------------
2840 selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
2841 selfA.DirectInnovation = [None,] # Le pas 0 n'est pas observé
2842 def CostFunction(x):
2843 _X = numpy.asmatrix(numpy.ravel( x )).T
2844 if selfA._parameters["StoreInternalVariables"] or \
2845 selfA._toStore("CurrentState") or \
2846 selfA._toStore("CurrentOptimum"):
2847 selfA.StoredVariables["CurrentState"].store( _X )
2848 Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
2849 selfA.DirectCalculation = [None,]
2850 selfA.DirectInnovation = [None,]
2853 for step in range(0,duration-1):
2854 if hasattr(Y,"store"):
2855 _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
2857 _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
2861 if selfA._parameters["EstimationOf"] == "State":
2862 _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
2863 elif selfA._parameters["EstimationOf"] == "Parameters":
2866 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
2867 _Xn = numpy.max(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
2868 _Xn = numpy.min(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
2870 # Etape de différence aux observations
2871 if selfA._parameters["EstimationOf"] == "State":
2872 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
2873 elif selfA._parameters["EstimationOf"] == "Parameters":
2874 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
2876 # Stockage de l'état
2877 selfA.DirectCalculation.append( _Xn )
2878 selfA.DirectInnovation.append( _YmHMX )
2880 # Ajout dans la fonctionnelle d'observation
2881 Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
2884 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2885 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2886 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2887 selfA.StoredVariables["CostFunctionJ" ].store( J )
2888 if selfA._toStore("IndexOfOptimum") or \
2889 selfA._toStore("CurrentOptimum") or \
2890 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2891 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2892 selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2893 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2894 if selfA._toStore("IndexOfOptimum"):
2895 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2896 if selfA._toStore("CurrentOptimum"):
2897 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2898 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2899 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2900 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2901 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2902 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2903 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2906 def GradientOfCostFunction(x):
2907 _X = numpy.asmatrix(numpy.ravel( x )).T
2908 GradJb = BI * (_X - Xb)
2910 for step in range(duration-1,0,-1):
2911 # Étape de récupération du dernier stockage de l'évolution
2912 _Xn = selfA.DirectCalculation.pop()
2913 # Étape de récupération du dernier stockage de l'innovation
2914 _YmHMX = selfA.DirectInnovation.pop()
2915 # Calcul des adjoints
2916 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
2917 Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
2918 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
2919 Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
2920 # Calcul du gradient par état adjoint
2921 GradJo = GradJo + Ha * RI * _YmHMX # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
2922 GradJo = Ma * GradJo # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
2923 GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
2926 # Minimisation de la fonctionnelle
2927 # --------------------------------
2928 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2930 if selfA._parameters["Minimizer"] == "LBFGSB":
2931 if "0.19" <= scipy.version.version <= "1.1.0":
2932 import lbfgsbhlt as optimiseur
2934 import scipy.optimize as optimiseur
2935 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2936 func = CostFunction,
2938 fprime = GradientOfCostFunction,
2940 bounds = selfA._parameters["Bounds"],
2941 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
2942 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
2943 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2944 iprint = selfA._parameters["optiprint"],
2946 nfeval = Informations['funcalls']
2947 rc = Informations['warnflag']
2948 elif selfA._parameters["Minimizer"] == "TNC":
2949 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2950 func = CostFunction,
2952 fprime = GradientOfCostFunction,
2954 bounds = selfA._parameters["Bounds"],
2955 maxfun = selfA._parameters["MaximumNumberOfSteps"],
2956 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2957 ftol = selfA._parameters["CostDecrementTolerance"],
2958 messages = selfA._parameters["optmessages"],
2960 elif selfA._parameters["Minimizer"] == "CG":
2961 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2964 fprime = GradientOfCostFunction,
2966 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2967 gtol = selfA._parameters["GradientNormTolerance"],
2968 disp = selfA._parameters["optdisp"],
2971 elif selfA._parameters["Minimizer"] == "NCG":
2972 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2975 fprime = GradientOfCostFunction,
2977 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2978 avextol = selfA._parameters["CostDecrementTolerance"],
2979 disp = selfA._parameters["optdisp"],
2982 elif selfA._parameters["Minimizer"] == "BFGS":
2983 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2986 fprime = GradientOfCostFunction,
2988 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2989 gtol = selfA._parameters["GradientNormTolerance"],
2990 disp = selfA._parameters["optdisp"],
2994 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2996 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2997 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2999 # Correction pour pallier a un bug de TNC sur le retour du Minimum
3000 # ----------------------------------------------------------------
3001 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3002 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3004 # Obtention de l'analyse
3005 # ----------------------
3006 Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
3008 selfA.StoredVariables["Analysis"].store( Xa )
3010 # Calculs et/ou stockages supplémentaires
3011 # ---------------------------------------
3012 if selfA._toStore("BMA"):
3013 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3017 # ==============================================================================
3018 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3020 3DVAR variational analysis with no inversion of B
3027 Hm = HO["Direct"].appliedTo
3028 Ha = HO["Adjoint"].appliedInXTo
3030 # Précalcul des inversions de B et R
3034 # Point de démarrage de l'optimisation
3035 Xini = numpy.zeros(Xb.shape)
3037 # Définition de la fonction-coût
3038 # ------------------------------
3039 def CostFunction(v):
3040 _V = numpy.asmatrix(numpy.ravel( v )).T
3042 if selfA._parameters["StoreInternalVariables"] or \
3043 selfA._toStore("CurrentState") or \
3044 selfA._toStore("CurrentOptimum"):
3045 selfA.StoredVariables["CurrentState"].store( _X )
3047 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
3048 _Innovation = Y - _HX
3049 if selfA._toStore("SimulatedObservationAtCurrentState") or \
3050 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3051 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
3052 if selfA._toStore("InnovationAtCurrentState"):
3053 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3055 Jb = float( 0.5 * _V.T * BT * _V )
3056 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
3059 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3060 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3061 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3062 selfA.StoredVariables["CostFunctionJ" ].store( J )
3063 if selfA._toStore("IndexOfOptimum") or \
3064 selfA._toStore("CurrentOptimum") or \
3065 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3066 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3067 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
3068 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3069 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3070 if selfA._toStore("IndexOfOptimum"):
3071 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3072 if selfA._toStore("CurrentOptimum"):
3073 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3074 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3075 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
3076 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3077 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3078 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3079 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3080 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3081 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3084 def GradientOfCostFunction(v):
3085 _V = numpy.asmatrix(numpy.ravel( v )).T
3088 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
3090 GradJo = - Ha( (_X, RI * (Y - _HX)) )
3091 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
3094 # Minimisation de la fonctionnelle
3095 # --------------------------------
3096 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3098 if selfA._parameters["Minimizer"] == "LBFGSB":
3099 if "0.19" <= scipy.version.version <= "1.1.0":
3100 import lbfgsbhlt as optimiseur
3102 import scipy.optimize as optimiseur
3103 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3104 func = CostFunction,
3106 fprime = GradientOfCostFunction,
3108 bounds = selfA._parameters["Bounds"],
3109 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
3110 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
3111 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3112 iprint = selfA._parameters["optiprint"],
3114 nfeval = Informations['funcalls']
3115 rc = Informations['warnflag']
3116 elif selfA._parameters["Minimizer"] == "TNC":
3117 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3118 func = CostFunction,
3120 fprime = GradientOfCostFunction,
3122 bounds = selfA._parameters["Bounds"],
3123 maxfun = selfA._parameters["MaximumNumberOfSteps"],
3124 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3125 ftol = selfA._parameters["CostDecrementTolerance"],
3126 messages = selfA._parameters["optmessages"],
3128 elif selfA._parameters["Minimizer"] == "CG":
3129 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3132 fprime = GradientOfCostFunction,
3134 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3135 gtol = selfA._parameters["GradientNormTolerance"],
3136 disp = selfA._parameters["optdisp"],
3139 elif selfA._parameters["Minimizer"] == "NCG":
3140 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3143 fprime = GradientOfCostFunction,
3145 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3146 avextol = selfA._parameters["CostDecrementTolerance"],
3147 disp = selfA._parameters["optdisp"],
3150 elif selfA._parameters["Minimizer"] == "BFGS":
3151 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3154 fprime = GradientOfCostFunction,
3156 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3157 gtol = selfA._parameters["GradientNormTolerance"],
3158 disp = selfA._parameters["optdisp"],
3162 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3164 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3165 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3167 # Correction pour pallier a un bug de TNC sur le retour du Minimum
3168 # ----------------------------------------------------------------
3169 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3170 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3171 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
3173 Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
3175 # Obtention de l'analyse
3176 # ----------------------
3179 selfA.StoredVariables["Analysis"].store( Xa )
3181 if selfA._toStore("OMA") or \
3182 selfA._toStore("SigmaObs2") or \
3183 selfA._toStore("SimulationQuantiles") or \
3184 selfA._toStore("SimulatedObservationAtOptimum"):
3185 if selfA._toStore("SimulatedObservationAtCurrentState"):
3186 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3187 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3188 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3192 # Calcul de la covariance d'analyse
3193 # ---------------------------------
3194 if selfA._toStore("APosterioriCovariance") or \
3195 selfA._toStore("SimulationQuantiles") or \
3196 selfA._toStore("JacobianMatrixAtOptimum") or \
3197 selfA._toStore("KalmanGainAtOptimum"):
3198 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3199 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3200 if selfA._toStore("APosterioriCovariance") or \
3201 selfA._toStore("SimulationQuantiles") or \
3202 selfA._toStore("KalmanGainAtOptimum"):
3203 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3204 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3205 if selfA._toStore("APosterioriCovariance") or \
3206 selfA._toStore("SimulationQuantiles"):
3211 _ee = numpy.matrix(numpy.zeros(nb)).T
3213 _HtEE = numpy.dot(HtM,_ee)
3214 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
3215 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
3216 HessienneI = numpy.matrix( HessienneI )
3218 if min(A.shape) != max(A.shape):
3219 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
3220 if (numpy.diag(A) < 0).any():
3221 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
3222 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
3224 L = numpy.linalg.cholesky( A )
3226 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
3227 if selfA._toStore("APosterioriCovariance"):
3228 selfA.StoredVariables["APosterioriCovariance"].store( A )
3229 if selfA._toStore("JacobianMatrixAtOptimum"):
3230 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3231 if selfA._toStore("KalmanGainAtOptimum"):
3232 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3233 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3234 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3236 # Calculs et/ou stockages supplémentaires
3237 # ---------------------------------------
3238 if selfA._toStore("Innovation") or \
3239 selfA._toStore("SigmaObs2") or \
3240 selfA._toStore("MahalanobisConsistency") or \
3241 selfA._toStore("OMB"):
3243 if selfA._toStore("Innovation"):
3244 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
3245 if selfA._toStore("BMA"):
3246 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3247 if selfA._toStore("OMA"):
3248 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3249 if selfA._toStore("OMB"):
3250 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
3251 if selfA._toStore("SigmaObs2"):
3252 TraceR = R.trace(Y.size)
3253 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
3254 if selfA._toStore("MahalanobisConsistency"):
3255 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3256 if selfA._toStore("SimulationQuantiles"):
3257 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
3258 HXa = numpy.matrix(numpy.ravel( HXa )).T
3261 for i in range(nech):
3262 if selfA._parameters["SimulationForQuantiles"] == "Linear":
3263 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
3264 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
3266 if selfA._toStore("SampledStateForQuantiles"): Xr = Xa+dXr
3267 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
3268 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
3269 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
3272 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.ravel(Xr)
3274 YfQ = numpy.hstack((YfQ,Yr))
3275 if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.vstack((EXr,numpy.ravel(Xr)))
3278 for quantile in selfA._parameters["Quantiles"]:
3279 if not (0. <= float(quantile) <= 1.): continue
3280 indice = int(nech * float(quantile) - 1./nech)
3281 if YQ is None: YQ = YfQ[:,indice]
3282 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
3283 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
3284 if selfA._toStore("SampledStateForQuantiles"):
3285 selfA.StoredVariables["SampledStateForQuantiles"].store( EXr.T )
3286 if selfA._toStore("SimulatedObservationAtBackground"):
3287 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
3288 if selfA._toStore("SimulatedObservationAtOptimum"):
3289 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
3293 # ==============================================================================
3294 if __name__ == "__main__":
3295 print('\n AUTODIAGNOSTIC\n')