1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les objets numériques génériques.
26 __author__ = "Jean-Philippe ARGAUD"
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
36 # ==============================================================================
37 def ExecuteFunction( triplet ):
38 assert len(triplet) == 3, "Incorrect number of arguments"
39 X, xArgs, funcrepr = triplet
40 __X = numpy.asmatrix(numpy.ravel( X )).T
41 __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42 __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43 __fonction = getattr(__module,funcrepr["__userFunction__name"])
44 sys.path = __sys_path_tmp ; del __sys_path_tmp
45 if isinstance(xArgs, dict):
46 __HX = __fonction( __X, **xArgs )
48 __HX = __fonction( __X )
49 return numpy.ravel( __HX )
51 # ==============================================================================
52 class FDApproximation(object):
54 Cette classe sert d'interface pour définir les opérateurs approximés. A la
55 création d'un objet, en fournissant une fonction "Function", on obtient un
56 objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
57 "AdjointOperator". On contrôle l'approximation DF avec l'incrément
58 multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
59 "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
60 centrées si le booléen "centeredDF" est vrai.
63 name = "FDApproximation",
68 extraArguments = None,
69 avoidingRedundancy = True,
70 toleranceInRedundancy = 1.e-18,
71 lenghtOfRedundancy = -1,
76 self.__name = str(name)
77 self.__extraArgs = extraArguments
80 import multiprocessing
81 self.__mpEnabled = True
83 self.__mpEnabled = False
85 self.__mpEnabled = False
86 self.__mpWorkers = mpWorkers
87 if self.__mpWorkers is not None and self.__mpWorkers < 1:
88 self.__mpWorkers = None
89 logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
92 self.__mfEnabled = True
94 self.__mfEnabled = False
95 logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
97 if avoidingRedundancy:
99 self.__tolerBP = float(toleranceInRedundancy)
100 self.__lenghtRJ = int(lenghtOfRedundancy)
101 self.__listJPCP = [] # Jacobian Previous Calculated Points
102 self.__listJPCI = [] # Jacobian Previous Calculated Increment
103 self.__listJPCR = [] # Jacobian Previous Calculated Results
104 self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
105 self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
107 self.__avoidRC = False
110 if isinstance(Function,types.FunctionType):
111 logging.debug("FDA Calculs en multiprocessing : FunctionType")
112 self.__userFunction__name = Function.__name__
114 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
116 mod = os.path.abspath(Function.__globals__['__file__'])
117 if not os.path.isfile(mod):
118 raise ImportError("No user defined function or method found with the name %s"%(mod,))
119 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
120 self.__userFunction__path = os.path.dirname(mod)
122 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
123 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
124 elif isinstance(Function,types.MethodType):
125 logging.debug("FDA Calculs en multiprocessing : MethodType")
126 self.__userFunction__name = Function.__name__
128 mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
130 mod = os.path.abspath(Function.__func__.__globals__['__file__'])
131 if not os.path.isfile(mod):
132 raise ImportError("No user defined function or method found with the name %s"%(mod,))
133 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
134 self.__userFunction__path = os.path.dirname(mod)
136 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
137 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
139 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
141 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
142 self.__userFunction = self.__userOperator.appliedTo
144 self.__centeredDF = bool(centeredDF)
145 if abs(float(increment)) > 1.e-15:
146 self.__increment = float(increment)
148 self.__increment = 0.01
152 self.__dX = numpy.asmatrix(numpy.ravel( dX )).T
153 logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
155 logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
157 # ---------------------------------------------------------
158 def __doublon__(self, e, l, n, v=None):
159 __ac, __iac = False, -1
160 for i in range(len(l)-1,-1,-1):
161 if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
162 __ac, __iac = True, i
163 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
167 # ---------------------------------------------------------
168 def DirectOperator(self, X, **extraArgs ):
170 Calcul du direct à l'aide de la fonction fournie.
172 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
173 ne doivent pas être données ici à la fonction utilisateur.
175 logging.debug("FDA Calcul DirectOperator (explicite)")
177 _HX = self.__userFunction( X, argsAsSerie = True )
179 _X = numpy.asmatrix(numpy.ravel( X )).T
180 _HX = numpy.ravel(self.__userFunction( _X ))
184 # ---------------------------------------------------------
185 def TangentMatrix(self, X ):
187 Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
188 c'est-à-dire le gradient de H en X. On utilise des différences finies
189 directionnelles autour du point X. X est un numpy.matrix.
191 Différences finies centrées (approximation d'ordre 2):
192 1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
193 dX[i] à la composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
194 on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
196 2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
198 3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
200 Différences finies non centrées (approximation d'ordre 1):
201 1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
202 composante X[i] pour composer X_plus_dXi, et on calcule la réponse
203 HX_plus_dXi = H( X_plus_dXi )
204 2/ On calcule la valeur centrale HX = H(X)
205 3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
207 4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
210 logging.debug("FDA Début du calcul de la Jacobienne")
211 logging.debug("FDA Incrément de............: %s*X"%float(self.__increment))
212 logging.debug("FDA Approximation centrée...: %s"%(self.__centeredDF))
214 if X is None or len(X)==0:
215 raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
217 _X = numpy.asmatrix(numpy.ravel( X )).T
219 if self.__dX is None:
220 _dX = self.__increment * _X
222 _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
224 if (_dX == 0.).any():
227 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
229 _dX = numpy.where( _dX == 0., moyenne, _dX )
231 __alreadyCalculated = False
233 __bidon, __alreadyCalculatedP = self.__doublon__(_X, self.__listJPCP, self.__listJPPN, None)
234 __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
235 if __alreadyCalculatedP == __alreadyCalculatedI > -1:
236 __alreadyCalculated, __i = True, __alreadyCalculatedP
237 logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
239 if __alreadyCalculated:
240 logging.debug("FDA Calcul Jacobienne (par récupération du doublon %i)"%__i)
241 _Jacobienne = self.__listJPCR[__i]
243 logging.debug("FDA Calcul Jacobienne (explicite)")
244 if self.__centeredDF:
246 if self.__mpEnabled and not self.__mfEnabled:
248 "__userFunction__path" : self.__userFunction__path,
249 "__userFunction__modl" : self.__userFunction__modl,
250 "__userFunction__name" : self.__userFunction__name,
253 for i in range( len(_dX) ):
255 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
256 _X_plus_dXi[i] = _X[i] + _dXi
257 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
258 _X_moins_dXi[i] = _X[i] - _dXi
260 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
261 _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
263 import multiprocessing
264 self.__pool = multiprocessing.Pool(self.__mpWorkers)
265 _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
270 for i in range( len(_dX) ):
271 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
273 elif self.__mfEnabled:
275 for i in range( len(_dX) ):
277 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
278 _X_plus_dXi[i] = _X[i] + _dXi
279 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
280 _X_moins_dXi[i] = _X[i] - _dXi
282 _xserie.append( _X_plus_dXi )
283 _xserie.append( _X_moins_dXi )
285 _HX_plusmoins_dX = self.DirectOperator( _xserie )
288 for i in range( len(_dX) ):
289 _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
293 for i in range( _dX.size ):
295 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
296 _X_plus_dXi[i] = _X[i] + _dXi
297 _X_moins_dXi = numpy.array( _X.A1, dtype=float )
298 _X_moins_dXi[i] = _X[i] - _dXi
300 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
301 _HX_moins_dXi = self.DirectOperator( _X_moins_dXi )
303 _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
307 if self.__mpEnabled and not self.__mfEnabled:
309 "__userFunction__path" : self.__userFunction__path,
310 "__userFunction__modl" : self.__userFunction__modl,
311 "__userFunction__name" : self.__userFunction__name,
314 _jobs.append( (_X.A1, self.__extraArgs, funcrepr) )
315 for i in range( len(_dX) ):
316 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
317 _X_plus_dXi[i] = _X[i] + _dX[i]
319 _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
321 import multiprocessing
322 self.__pool = multiprocessing.Pool(self.__mpWorkers)
323 _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
327 _HX = _HX_plus_dX.pop(0)
330 for i in range( len(_dX) ):
331 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
333 elif self.__mfEnabled:
335 _xserie.append( _X.A1 )
336 for i in range( len(_dX) ):
337 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
338 _X_plus_dXi[i] = _X[i] + _dX[i]
340 _xserie.append( _X_plus_dXi )
342 _HX_plus_dX = self.DirectOperator( _xserie )
344 _HX = _HX_plus_dX.pop(0)
347 for i in range( len(_dX) ):
348 _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
352 _HX = self.DirectOperator( _X )
353 for i in range( _dX.size ):
355 _X_plus_dXi = numpy.array( _X.A1, dtype=float )
356 _X_plus_dXi[i] = _X[i] + _dXi
358 _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
360 _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
363 _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
365 if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
366 while len(self.__listJPCP) > self.__lenghtRJ:
367 self.__listJPCP.pop(0)
368 self.__listJPCI.pop(0)
369 self.__listJPCR.pop(0)
370 self.__listJPPN.pop(0)
371 self.__listJPIN.pop(0)
372 self.__listJPCP.append( copy.copy(_X) )
373 self.__listJPCI.append( copy.copy(_dX) )
374 self.__listJPCR.append( copy.copy(_Jacobienne) )
375 self.__listJPPN.append( numpy.linalg.norm(_X) )
376 self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
378 logging.debug("FDA Fin du calcul de la Jacobienne")
382 # ---------------------------------------------------------
383 def TangentOperator(self, paire, **extraArgs ):
385 Calcul du tangent à l'aide de la Jacobienne.
387 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
388 ne doivent pas être données ici à la fonction utilisateur.
391 assert len(paire) == 1, "Incorrect lenght of arguments"
393 assert len(_paire) == 2, "Incorrect number of arguments"
395 assert len(paire) == 2, "Incorrect number of arguments"
398 _Jacobienne = self.TangentMatrix( X )
399 if dX is None or len(dX) == 0:
401 # Calcul de la forme matricielle si le second argument est None
402 # -------------------------------------------------------------
403 if self.__mfEnabled: return [_Jacobienne,]
404 else: return _Jacobienne
407 # Calcul de la valeur linéarisée de H en X appliqué à dX
408 # ------------------------------------------------------
409 _dX = numpy.asmatrix(numpy.ravel( dX )).T
410 _HtX = numpy.dot(_Jacobienne, _dX)
411 if self.__mfEnabled: return [_HtX.A1,]
414 # ---------------------------------------------------------
415 def AdjointOperator(self, paire, **extraArgs ):
417 Calcul de l'adjoint à l'aide de la Jacobienne.
419 NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
420 ne doivent pas être données ici à la fonction utilisateur.
423 assert len(paire) == 1, "Incorrect lenght of arguments"
425 assert len(_paire) == 2, "Incorrect number of arguments"
427 assert len(paire) == 2, "Incorrect number of arguments"
430 _JacobienneT = self.TangentMatrix( X ).T
431 if Y is None or len(Y) == 0:
433 # Calcul de la forme matricielle si le second argument est None
434 # -------------------------------------------------------------
435 if self.__mfEnabled: return [_JacobienneT,]
436 else: return _JacobienneT
439 # Calcul de la valeur de l'adjoint en X appliqué à Y
440 # --------------------------------------------------
441 _Y = numpy.asmatrix(numpy.ravel( Y )).T
442 _HaY = numpy.dot(_JacobienneT, _Y)
443 if self.__mfEnabled: return [_HaY.A1,]
446 # ==============================================================================
447 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
448 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
450 _bgcenter = numpy.ravel(_bgcenter)[:,None]
452 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
454 if _bgcovariance is None:
455 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
457 _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
458 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
460 return BackgroundEnsemble
462 # ==============================================================================
463 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
464 "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
465 def __CenteredRandomAnomalies(Zr, N):
467 Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
468 notes manuscrites de MB et conforme au code de PS avec eps = -1
471 Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
472 Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
473 R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
478 _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
480 raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
481 if _bgcovariance is None:
482 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
485 U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
486 _nbctl = _bgcenter.size
487 if _nbmembers > _nbctl:
488 _Z = numpy.concatenate((numpy.dot(
489 numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
490 numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
492 _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
493 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
494 BackgroundEnsemble = _bgcenter + _Zca
496 if max(abs(_bgcovariance.flatten())) > 0:
497 _nbctl = _bgcenter.size
498 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
499 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
500 BackgroundEnsemble = _bgcenter + _Zca
502 BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
504 return BackgroundEnsemble
506 # ==============================================================================
507 def EnsembleOfAnomalies( Ensemble, OptMean = None, Normalisation = 1.):
508 "Renvoie les anomalies centrées à partir d'un ensemble TailleEtat*NbMembres"
510 __Em = numpy.asarray(Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
512 __Em = numpy.ravel(OptMean).reshape((-1,1))
514 return Normalisation * (numpy.asarray(Ensemble) - __Em)
516 # ==============================================================================
517 def EnsembleErrorCovariance( Ensemble ):
518 "Renvoie la covariance d'ensemble"
519 __Anomalies = EnsembleOfAnomalies( Ensemble )
520 __n, __m = numpy.asarray(__Anomalies).shape
521 # Estimation empirique
522 __Covariance = (__Anomalies @ __Anomalies.T) / (__m-1)
524 __Covariance = (__Covariance + __Covariance.T) * 0.5
525 # Assure la positivité
526 __epsilon = mpr*numpy.trace(__Covariance)
527 __Covariance = __Covariance + __epsilon * numpy.identity(__n)
531 # ==============================================================================
532 def CovarianceInflation(
534 InflationType = None,
535 InflationFactor = None,
536 BackgroundCov = None,
539 Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
541 Synthèse : Hunt 2007, section 2.3.5
543 if InflationFactor is None:
546 InflationFactor = float(InflationFactor)
548 if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
549 if InflationFactor < 1.:
550 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
551 if InflationFactor < 1.+mpr:
553 OutputCovOrEns = InflationFactor**2 * InputCovOrEns
555 elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
556 if InflationFactor < 1.:
557 raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
558 if InflationFactor < 1.+mpr:
560 InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
561 OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
562 + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
564 elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
565 if InflationFactor < 0.:
566 raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
567 if InflationFactor < mpr:
569 __n, __m = numpy.asarray(InputCovOrEns).shape
571 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
572 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
574 elif InflationType == "HybridOnBackgroundCovariance":
575 if InflationFactor < 0.:
576 raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
577 if InflationFactor < mpr:
579 __n, __m = numpy.asarray(InputCovOrEns).shape
581 raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
582 if BackgroundCov is None:
583 raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
584 if InputCovOrEns.shape != BackgroundCov.shape:
585 raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
586 OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
588 elif InflationType == "Relaxation":
589 raise NotImplementedError("InflationType Relaxation")
592 raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
594 return OutputCovOrEns
596 # ==============================================================================
597 def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
606 H = HO["Direct"].appliedControledFormTo
608 if selfA._parameters["EstimationOf"] == "State":
609 M = EM["Direct"].appliedControledFormTo
611 if CM is not None and "Tangent" in CM and U is not None:
612 Cm = CM["Tangent"].asMatrix(Xb)
616 # Précalcul des inversions de B et R
619 LagL = selfA._parameters["SmootherLagL"]
620 if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
621 raise ValueError("Fixed-lag smoother requires a series of observation")
622 if Y.stepnumber() < LagL:
623 raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
624 duration = Y.stepnumber()
625 __p = numpy.cumprod(Y.shape())[-1]
627 __m = selfA._parameters["NumberOfMembers"]
629 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
631 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
633 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
634 selfA.StoredVariables["Analysis"].store( Xb )
635 if selfA._toStore("APosterioriCovariance"):
636 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
639 # Calcul direct initial (on privilégie la mémorisation au recalcul)
640 __seed = numpy.random.get_state()
641 selfB = copy.deepcopy(selfA)
642 selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
643 if VariantM == "EnKS16-KalmanFilterFormula":
644 etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
646 raise ValueError("VariantM has to be chosen in the authorized methods list.")
648 EL = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
650 EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
651 selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
653 for step in range(LagL,duration-1):
655 sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
658 if hasattr(Y,"store"):
659 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
661 Ynpu = numpy.ravel( Y ).reshape((__p,1))
664 if hasattr(U,"store") and len(U)>1:
665 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
666 elif hasattr(U,"store") and len(U)==1:
667 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
669 Un = numpy.asmatrix(numpy.ravel( U )).T
673 #--------------------------
674 if VariantM == "EnKS16-KalmanFilterFormula":
675 if selfA._parameters["EstimationOf"] == "State": # Forecast
676 EL = M( [(EL[:,i], Un) for i in range(__m)],
678 returnSerieAsArrayMatrix = True )
679 EL = EL + numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
680 EZ = H( [(EL[:,i], Un) for i in range(__m)],
682 returnSerieAsArrayMatrix = True )
683 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
684 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
686 elif selfA._parameters["EstimationOf"] == "Parameters":
687 # --- > Par principe, M = Id, Q = 0
688 EZ = H( [(EL[:,i], Un) for i in range(__m)],
690 returnSerieAsArrayMatrix = True )
692 vEm = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
693 vZm = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
695 mS = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
696 delta = RIdemi @ ( Ynpu - vZm )
697 mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
698 vw = mT @ mS.T @ delta
700 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
701 mU = numpy.identity(__m)
702 wTU = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
704 EX = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
708 for irl in range(LagL): # Lissage des L précédentes analysis
709 vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
710 EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
711 sEL[irl] = vEm + EX @ wTU
713 # Conservation de l'analyse retrospective d'ordre 0 avant rotation
714 Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
715 if selfA._toStore("APosterioriCovariance"):
718 for irl in range(LagL):
719 sEL[irl] = sEL[irl+1]
721 #--------------------------
723 raise ValueError("VariantM has to be chosen in the authorized methods list.")
725 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
726 selfA.StoredVariables["Analysis"].store( Xa )
727 if selfA._toStore("APosterioriCovariance"):
728 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
730 # Stockage des dernières analyses incomplètement remises à jour
731 for irl in range(LagL):
732 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
733 Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
734 selfA.StoredVariables["Analysis"].store( Xa )
738 # ==============================================================================
739 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
741 Ensemble-Transform EnKF
743 if selfA._parameters["EstimationOf"] == "Parameters":
744 selfA._parameters["StoreInternalVariables"] = True
748 H = HO["Direct"].appliedControledFormTo
750 if selfA._parameters["EstimationOf"] == "State":
751 M = EM["Direct"].appliedControledFormTo
753 if CM is not None and "Tangent" in CM and U is not None:
754 Cm = CM["Tangent"].asMatrix(Xb)
758 # Nombre de pas identique au nombre de pas d'observations
759 # -------------------------------------------------------
760 if hasattr(Y,"stepnumber"):
761 duration = Y.stepnumber()
762 __p = numpy.cumprod(Y.shape())[-1]
765 __p = numpy.array(Y).size
767 # Précalcul des inversions de B et R
768 # ----------------------------------
769 if selfA._parameters["StoreInternalVariables"] \
770 or selfA._toStore("CostFunctionJ") \
771 or selfA._toStore("CostFunctionJb") \
772 or selfA._toStore("CostFunctionJo") \
773 or selfA._toStore("CurrentOptimum") \
774 or selfA._toStore("APosterioriCovariance"):
777 elif VariantM != "KalmanFilterFormula":
779 if VariantM == "KalmanFilterFormula":
785 __m = selfA._parameters["NumberOfMembers"]
786 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
788 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
790 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
791 #~ Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
793 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
794 selfA.StoredVariables["Analysis"].store( Xb )
795 if selfA._toStore("APosterioriCovariance"):
796 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
799 previousJMinimum = numpy.finfo(float).max
801 for step in range(duration-1):
802 if hasattr(Y,"store"):
803 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
805 Ynpu = numpy.ravel( Y ).reshape((__p,1))
808 if hasattr(U,"store") and len(U)>1:
809 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
810 elif hasattr(U,"store") and len(U)==1:
811 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
813 Un = numpy.asmatrix(numpy.ravel( U )).T
817 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
818 Xn = CovarianceInflation( Xn,
819 selfA._parameters["InflationType"],
820 selfA._parameters["InflationFactor"],
823 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
824 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
826 returnSerieAsArrayMatrix = True )
827 qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
828 Xn_predicted = EMX + qi
829 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
831 returnSerieAsArrayMatrix = True )
832 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
833 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
834 Xn_predicted = Xn_predicted + Cm * Un
835 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
836 # --- > Par principe, M = Id, Q = 0
838 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
840 returnSerieAsArrayMatrix = True )
842 # Mean of forecast and observation of forecast
843 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
844 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
847 EaX = EnsembleOfAnomalies( Xn_predicted, Xfm )
848 EaHX = EnsembleOfAnomalies( HX_predicted, Hfm)
850 #--------------------------
851 if VariantM == "KalmanFilterFormula":
852 mS = RIdemi * EaHX / math.sqrt(__m-1)
853 delta = RIdemi * ( Ynpu - Hfm )
854 mT = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
855 vw = mT @ mS.T @ delta
857 Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
858 mU = numpy.identity(__m)
860 EaX = EaX / math.sqrt(__m-1)
861 Xn = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
862 #--------------------------
863 elif VariantM == "Variational":
864 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
866 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
867 _Jo = 0.5 * _A.T @ (RI * _A)
868 _Jb = 0.5 * (__m-1) * w.T @ w
871 def GradientOfCostFunction(w):
872 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
873 _GardJo = - EaHX.T @ (RI * _A)
874 _GradJb = (__m-1) * w.reshape((__m,1))
875 _GradJ = _GardJo + _GradJb
876 return numpy.ravel(_GradJ)
877 vw = scipy.optimize.fmin_cg(
879 x0 = numpy.zeros(__m),
880 fprime = GradientOfCostFunction,
885 Hto = EaHX.T @ (RI * EaHX)
886 Htb = (__m-1) * numpy.identity(__m)
889 Pta = numpy.linalg.inv( Hta )
890 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
892 Xn = Xfm + EaX @ (vw[:,None] + EWa)
893 #--------------------------
894 elif VariantM == "FiniteSize11": # Jauge Boc2011
895 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
897 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
898 _Jo = 0.5 * _A.T @ (RI * _A)
899 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
902 def GradientOfCostFunction(w):
903 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
904 _GardJo = - EaHX.T @ (RI * _A)
905 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
906 _GradJ = _GardJo + _GradJb
907 return numpy.ravel(_GradJ)
908 vw = scipy.optimize.fmin_cg(
910 x0 = numpy.zeros(__m),
911 fprime = GradientOfCostFunction,
916 Hto = EaHX.T @ (RI * EaHX)
918 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
919 / (1 + 1/__m + vw.T @ vw)**2
922 Pta = numpy.linalg.inv( Hta )
923 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
925 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
926 #--------------------------
927 elif VariantM == "FiniteSize15": # Jauge Boc2015
928 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
930 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
931 _Jo = 0.5 * _A.T * RI * _A
932 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
935 def GradientOfCostFunction(w):
936 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
937 _GardJo = - EaHX.T @ (RI * _A)
938 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
939 _GradJ = _GardJo + _GradJb
940 return numpy.ravel(_GradJ)
941 vw = scipy.optimize.fmin_cg(
943 x0 = numpy.zeros(__m),
944 fprime = GradientOfCostFunction,
949 Hto = EaHX.T @ (RI * EaHX)
951 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
952 / (1 + 1/__m + vw.T @ vw)**2
955 Pta = numpy.linalg.inv( Hta )
956 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
958 Xn = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
959 #--------------------------
960 elif VariantM == "FiniteSize16": # Jauge Boc2016
961 HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
963 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
964 _Jo = 0.5 * _A.T @ (RI * _A)
965 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
968 def GradientOfCostFunction(w):
969 _A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
970 _GardJo = - EaHX.T @ (RI * _A)
971 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
972 _GradJ = _GardJo + _GradJb
973 return numpy.ravel(_GradJ)
974 vw = scipy.optimize.fmin_cg(
976 x0 = numpy.zeros(__m),
977 fprime = GradientOfCostFunction,
982 Hto = EaHX.T @ (RI * EaHX)
983 Htb = ((__m+1) / (__m-1)) * \
984 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
985 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
988 Pta = numpy.linalg.inv( Hta )
989 EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
991 Xn = Xfm + EaX @ (vw[:,None] + EWa)
992 #--------------------------
994 raise ValueError("VariantM has to be chosen in the authorized methods list.")
996 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
997 Xn = CovarianceInflation( Xn,
998 selfA._parameters["InflationType"],
999 selfA._parameters["InflationFactor"],
1002 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1003 #--------------------------
1005 if selfA._parameters["StoreInternalVariables"] \
1006 or selfA._toStore("CostFunctionJ") \
1007 or selfA._toStore("CostFunctionJb") \
1008 or selfA._toStore("CostFunctionJo") \
1009 or selfA._toStore("APosterioriCovariance") \
1010 or selfA._toStore("InnovationAtCurrentAnalysis") \
1011 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1012 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1013 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1014 _Innovation = Ynpu - _HXa
1016 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1017 # ---> avec analysis
1018 selfA.StoredVariables["Analysis"].store( Xa )
1019 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1020 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1021 if selfA._toStore("InnovationAtCurrentAnalysis"):
1022 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1023 # ---> avec current state
1024 if selfA._parameters["StoreInternalVariables"] \
1025 or selfA._toStore("CurrentState"):
1026 selfA.StoredVariables["CurrentState"].store( Xn )
1027 if selfA._toStore("ForecastState"):
1028 selfA.StoredVariables["ForecastState"].store( EMX )
1029 if selfA._toStore("BMA"):
1030 selfA.StoredVariables["BMA"].store( EMX - Xa.reshape((__n,1)) )
1031 if selfA._toStore("InnovationAtCurrentState"):
1032 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1033 if selfA._toStore("SimulatedObservationAtCurrentState") \
1034 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1035 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1037 if selfA._parameters["StoreInternalVariables"] \
1038 or selfA._toStore("CostFunctionJ") \
1039 or selfA._toStore("CostFunctionJb") \
1040 or selfA._toStore("CostFunctionJo") \
1041 or selfA._toStore("CurrentOptimum") \
1042 or selfA._toStore("APosterioriCovariance"):
1043 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1044 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1046 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1047 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1048 selfA.StoredVariables["CostFunctionJ" ].store( J )
1050 if selfA._toStore("IndexOfOptimum") \
1051 or selfA._toStore("CurrentOptimum") \
1052 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1053 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1054 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1055 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1056 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1057 if selfA._toStore("IndexOfOptimum"):
1058 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1059 if selfA._toStore("CurrentOptimum"):
1060 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1061 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1062 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1063 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1064 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1065 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1066 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1067 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1068 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1069 if selfA._toStore("APosterioriCovariance"):
1070 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1071 if selfA._parameters["EstimationOf"] == "Parameters" \
1072 and J < previousJMinimum:
1073 previousJMinimum = J
1075 if selfA._toStore("APosterioriCovariance"):
1076 covarianceXaMin = Pn
1077 # ---> Pour les smoothers
1078 if selfA._toStore("CurrentEnsembleState"):
1079 selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
1081 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1082 # ----------------------------------------------------------------------
1083 if selfA._parameters["EstimationOf"] == "Parameters":
1084 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1085 selfA.StoredVariables["Analysis"].store( XaMin )
1086 if selfA._toStore("APosterioriCovariance"):
1087 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1088 if selfA._toStore("BMA"):
1089 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1093 # ==============================================================================
1094 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
1095 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1099 if selfA._parameters["EstimationOf"] == "Parameters":
1100 selfA._parameters["StoreInternalVariables"] = True
1104 H = HO["Direct"].appliedControledFormTo
1106 if selfA._parameters["EstimationOf"] == "State":
1107 M = EM["Direct"].appliedControledFormTo
1109 if CM is not None and "Tangent" in CM and U is not None:
1110 Cm = CM["Tangent"].asMatrix(Xb)
1114 # Nombre de pas identique au nombre de pas d'observations
1115 # -------------------------------------------------------
1116 if hasattr(Y,"stepnumber"):
1117 duration = Y.stepnumber()
1118 __p = numpy.cumprod(Y.shape())[-1]
1121 __p = numpy.array(Y).size
1123 # Précalcul des inversions de B et R
1124 # ----------------------------------
1125 if selfA._parameters["StoreInternalVariables"] \
1126 or selfA._toStore("CostFunctionJ") \
1127 or selfA._toStore("CostFunctionJb") \
1128 or selfA._toStore("CostFunctionJo") \
1129 or selfA._toStore("CurrentOptimum") \
1130 or selfA._toStore("APosterioriCovariance"):
1137 __m = selfA._parameters["NumberOfMembers"]
1138 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1140 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1142 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
1144 Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
1146 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1147 selfA.StoredVariables["Analysis"].store( Xb )
1148 if selfA._toStore("APosterioriCovariance"):
1149 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1152 previousJMinimum = numpy.finfo(float).max
1154 for step in range(duration-1):
1155 if hasattr(Y,"store"):
1156 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1158 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1161 if hasattr(U,"store") and len(U)>1:
1162 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1163 elif hasattr(U,"store") and len(U)==1:
1164 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1166 Un = numpy.asmatrix(numpy.ravel( U )).T
1170 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1171 Xn = CovarianceInflation( Xn,
1172 selfA._parameters["InflationType"],
1173 selfA._parameters["InflationFactor"],
1176 #--------------------------
1177 if VariantM == "IEnKF12":
1178 Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
1179 EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
1183 Ta = numpy.identity(__m)
1184 vw = numpy.zeros(__m)
1185 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1186 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1189 E1 = vx1 + _epsilon * EaX
1191 E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1193 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
1194 E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1196 returnSerieAsArrayMatrix = True )
1197 elif selfA._parameters["EstimationOf"] == "Parameters":
1198 # --- > Par principe, M = Id
1200 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1201 vy1 = H((vx2, Un)).reshape((__p,1))
1203 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
1205 returnSerieAsArrayMatrix = True )
1206 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1209 EaY = (HE2 - vy2) / _epsilon
1211 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1213 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
1214 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY)
1215 Deltaw = - numpy.linalg.solve(mH,GradJ)
1220 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1224 A2 = EnsembleOfAnomalies( E2 )
1227 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1228 A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
1231 #--------------------------
1233 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1235 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1236 Xn = CovarianceInflation( Xn,
1237 selfA._parameters["InflationType"],
1238 selfA._parameters["InflationFactor"],
1241 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1242 #--------------------------
1244 if selfA._parameters["StoreInternalVariables"] \
1245 or selfA._toStore("CostFunctionJ") \
1246 or selfA._toStore("CostFunctionJb") \
1247 or selfA._toStore("CostFunctionJo") \
1248 or selfA._toStore("APosterioriCovariance") \
1249 or selfA._toStore("InnovationAtCurrentAnalysis") \
1250 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1251 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1252 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1253 _Innovation = Ynpu - _HXa
1255 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1256 # ---> avec analysis
1257 selfA.StoredVariables["Analysis"].store( Xa )
1258 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1259 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1260 if selfA._toStore("InnovationAtCurrentAnalysis"):
1261 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1262 # ---> avec current state
1263 if selfA._parameters["StoreInternalVariables"] \
1264 or selfA._toStore("CurrentState"):
1265 selfA.StoredVariables["CurrentState"].store( Xn )
1266 if selfA._toStore("ForecastState"):
1267 selfA.StoredVariables["ForecastState"].store( E2 )
1268 if selfA._toStore("BMA"):
1269 selfA.StoredVariables["BMA"].store( E2 - Xa )
1270 if selfA._toStore("InnovationAtCurrentState"):
1271 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1272 if selfA._toStore("SimulatedObservationAtCurrentState") \
1273 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1274 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1276 if selfA._parameters["StoreInternalVariables"] \
1277 or selfA._toStore("CostFunctionJ") \
1278 or selfA._toStore("CostFunctionJb") \
1279 or selfA._toStore("CostFunctionJo") \
1280 or selfA._toStore("CurrentOptimum") \
1281 or selfA._toStore("APosterioriCovariance"):
1282 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1283 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1285 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1286 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1287 selfA.StoredVariables["CostFunctionJ" ].store( J )
1289 if selfA._toStore("IndexOfOptimum") \
1290 or selfA._toStore("CurrentOptimum") \
1291 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1292 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1293 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1294 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1295 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1296 if selfA._toStore("IndexOfOptimum"):
1297 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1298 if selfA._toStore("CurrentOptimum"):
1299 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1300 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1301 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1302 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1303 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1304 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1305 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1306 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1307 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1308 if selfA._toStore("APosterioriCovariance"):
1309 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1310 if selfA._parameters["EstimationOf"] == "Parameters" \
1311 and J < previousJMinimum:
1312 previousJMinimum = J
1314 if selfA._toStore("APosterioriCovariance"):
1315 covarianceXaMin = Pn
1317 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1318 # ----------------------------------------------------------------------
1319 if selfA._parameters["EstimationOf"] == "Parameters":
1320 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1321 selfA.StoredVariables["Analysis"].store( XaMin )
1322 if selfA._toStore("APosterioriCovariance"):
1323 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1324 if selfA._toStore("BMA"):
1325 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1329 # ==============================================================================
1330 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1338 # Opérateur non-linéaire pour la boucle externe
1339 Hm = HO["Direct"].appliedTo
1341 # Précalcul des inversions de B et R
1345 # Point de démarrage de l'optimisation
1346 Xini = selfA._parameters["InitializationPoint"]
1348 HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
1349 Innovation = Y - HXb
1356 Xr = Xini.reshape((-1,1))
1357 while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
1361 Ht = HO["Tangent"].asMatrix(Xr)
1362 Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
1364 # Définition de la fonction-coût
1365 # ------------------------------
1366 def CostFunction(dx):
1367 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1368 if selfA._parameters["StoreInternalVariables"] or \
1369 selfA._toStore("CurrentState") or \
1370 selfA._toStore("CurrentOptimum"):
1371 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
1373 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1374 _dInnovation = Innovation - _HdX
1375 if selfA._toStore("SimulatedObservationAtCurrentState") or \
1376 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1377 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
1378 if selfA._toStore("InnovationAtCurrentState"):
1379 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
1381 Jb = float( 0.5 * _dX.T * BI * _dX )
1382 Jo = float( 0.5 * _dInnovation.T * RI * _dInnovation )
1385 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
1386 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1387 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1388 selfA.StoredVariables["CostFunctionJ" ].store( J )
1389 if selfA._toStore("IndexOfOptimum") or \
1390 selfA._toStore("CurrentOptimum") or \
1391 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
1392 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
1393 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
1394 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1395 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1396 if selfA._toStore("IndexOfOptimum"):
1397 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1398 if selfA._toStore("CurrentOptimum"):
1399 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
1400 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1401 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
1402 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1403 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1404 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1405 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1406 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1407 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1410 def GradientOfCostFunction(dx):
1411 _dX = numpy.asmatrix(numpy.ravel( dx )).T
1413 _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
1414 _dInnovation = Innovation - _HdX
1416 GradJo = - Ht.T @ (RI * _dInnovation)
1417 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
1420 # Minimisation de la fonctionnelle
1421 # --------------------------------
1422 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
1424 if selfA._parameters["Minimizer"] == "LBFGSB":
1425 # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
1426 if "0.19" <= scipy.version.version <= "1.1.0":
1427 import lbfgsbhlt as optimiseur
1429 import scipy.optimize as optimiseur
1430 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
1431 func = CostFunction,
1432 x0 = numpy.zeros(Xini.size),
1433 fprime = GradientOfCostFunction,
1435 bounds = selfA._parameters["Bounds"],
1436 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
1437 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
1438 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1439 iprint = selfA._parameters["optiprint"],
1441 nfeval = Informations['funcalls']
1442 rc = Informations['warnflag']
1443 elif selfA._parameters["Minimizer"] == "TNC":
1444 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
1445 func = CostFunction,
1446 x0 = numpy.zeros(Xini.size),
1447 fprime = GradientOfCostFunction,
1449 bounds = selfA._parameters["Bounds"],
1450 maxfun = selfA._parameters["MaximumNumberOfSteps"],
1451 pgtol = selfA._parameters["ProjectedGradientTolerance"],
1452 ftol = selfA._parameters["CostDecrementTolerance"],
1453 messages = selfA._parameters["optmessages"],
1455 elif selfA._parameters["Minimizer"] == "CG":
1456 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
1458 x0 = numpy.zeros(Xini.size),
1459 fprime = GradientOfCostFunction,
1461 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1462 gtol = selfA._parameters["GradientNormTolerance"],
1463 disp = selfA._parameters["optdisp"],
1466 elif selfA._parameters["Minimizer"] == "NCG":
1467 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
1469 x0 = numpy.zeros(Xini.size),
1470 fprime = GradientOfCostFunction,
1472 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1473 avextol = selfA._parameters["CostDecrementTolerance"],
1474 disp = selfA._parameters["optdisp"],
1477 elif selfA._parameters["Minimizer"] == "BFGS":
1478 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
1480 x0 = numpy.zeros(Xini.size),
1481 fprime = GradientOfCostFunction,
1483 maxiter = selfA._parameters["MaximumNumberOfSteps"],
1484 gtol = selfA._parameters["GradientNormTolerance"],
1485 disp = selfA._parameters["optdisp"],
1489 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
1491 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1492 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
1494 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
1495 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
1496 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
1498 Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
1501 DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
1502 iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
1504 # Obtention de l'analyse
1505 # ----------------------
1508 selfA.StoredVariables["Analysis"].store( Xa )
1510 if selfA._toStore("OMA") or \
1511 selfA._toStore("SigmaObs2") or \
1512 selfA._toStore("SimulationQuantiles") or \
1513 selfA._toStore("SimulatedObservationAtOptimum"):
1514 if selfA._toStore("SimulatedObservationAtCurrentState"):
1515 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
1516 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1517 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
1521 # Calcul de la covariance d'analyse
1522 # ---------------------------------
1523 if selfA._toStore("APosterioriCovariance") or \
1524 selfA._toStore("SimulationQuantiles") or \
1525 selfA._toStore("JacobianMatrixAtOptimum") or \
1526 selfA._toStore("KalmanGainAtOptimum"):
1527 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
1528 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
1529 if selfA._toStore("APosterioriCovariance") or \
1530 selfA._toStore("SimulationQuantiles") or \
1531 selfA._toStore("KalmanGainAtOptimum"):
1532 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
1533 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
1534 if selfA._toStore("APosterioriCovariance") or \
1535 selfA._toStore("SimulationQuantiles"):
1539 _ee = numpy.matrix(numpy.zeros(nb)).T
1541 _HtEE = numpy.dot(HtM,_ee)
1542 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
1543 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
1544 HessienneI = numpy.matrix( HessienneI )
1546 if min(A.shape) != max(A.shape):
1547 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
1548 if (numpy.diag(A) < 0).any():
1549 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
1550 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
1552 L = numpy.linalg.cholesky( A )
1554 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
1555 if selfA._toStore("APosterioriCovariance"):
1556 selfA.StoredVariables["APosterioriCovariance"].store( A )
1557 if selfA._toStore("JacobianMatrixAtOptimum"):
1558 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
1559 if selfA._toStore("KalmanGainAtOptimum"):
1560 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
1561 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
1562 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
1564 # Calculs et/ou stockages supplémentaires
1565 # ---------------------------------------
1566 if selfA._toStore("Innovation") or \
1567 selfA._toStore("SigmaObs2") or \
1568 selfA._toStore("MahalanobisConsistency") or \
1569 selfA._toStore("OMB"):
1571 if selfA._toStore("Innovation"):
1572 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
1573 if selfA._toStore("BMA"):
1574 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
1575 if selfA._toStore("OMA"):
1576 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
1577 if selfA._toStore("OMB"):
1578 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
1579 if selfA._toStore("SigmaObs2"):
1580 TraceR = R.trace(Y.size)
1581 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
1582 if selfA._toStore("MahalanobisConsistency"):
1583 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
1584 if selfA._toStore("SimulationQuantiles"):
1585 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
1586 HXa = numpy.matrix(numpy.ravel( HXa )).T
1588 for i in range(nech):
1589 if selfA._parameters["SimulationForQuantiles"] == "Linear":
1590 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
1591 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
1593 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
1594 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
1595 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
1599 YfQ = numpy.hstack((YfQ,Yr))
1602 for quantile in selfA._parameters["Quantiles"]:
1603 if not (0. <= float(quantile) <= 1.): continue
1604 indice = int(nech * float(quantile) - 1./nech)
1605 if YQ is None: YQ = YfQ[:,indice]
1606 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
1607 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
1608 if selfA._toStore("SimulatedObservationAtBackground"):
1609 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
1610 if selfA._toStore("SimulatedObservationAtOptimum"):
1611 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
1615 # ==============================================================================
1616 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
1617 BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1619 Maximum Likelihood Ensemble Filter
1621 if selfA._parameters["EstimationOf"] == "Parameters":
1622 selfA._parameters["StoreInternalVariables"] = True
1626 H = HO["Direct"].appliedControledFormTo
1628 if selfA._parameters["EstimationOf"] == "State":
1629 M = EM["Direct"].appliedControledFormTo
1631 if CM is not None and "Tangent" in CM and U is not None:
1632 Cm = CM["Tangent"].asMatrix(Xb)
1636 # Nombre de pas identique au nombre de pas d'observations
1637 # -------------------------------------------------------
1638 if hasattr(Y,"stepnumber"):
1639 duration = Y.stepnumber()
1640 __p = numpy.cumprod(Y.shape())[-1]
1643 __p = numpy.array(Y).size
1645 # Précalcul des inversions de B et R
1646 # ----------------------------------
1647 if selfA._parameters["StoreInternalVariables"] \
1648 or selfA._toStore("CostFunctionJ") \
1649 or selfA._toStore("CostFunctionJb") \
1650 or selfA._toStore("CostFunctionJo") \
1651 or selfA._toStore("CurrentOptimum") \
1652 or selfA._toStore("APosterioriCovariance"):
1659 __m = selfA._parameters["NumberOfMembers"]
1660 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
1662 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
1664 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
1666 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1668 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1669 selfA.StoredVariables["Analysis"].store( Xb )
1670 if selfA._toStore("APosterioriCovariance"):
1671 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1674 previousJMinimum = numpy.finfo(float).max
1676 for step in range(duration-1):
1677 if hasattr(Y,"store"):
1678 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1680 Ynpu = numpy.ravel( Y ).reshape((__p,1))
1683 if hasattr(U,"store") and len(U)>1:
1684 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
1685 elif hasattr(U,"store") and len(U)==1:
1686 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
1688 Un = numpy.asmatrix(numpy.ravel( U )).T
1692 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1693 Xn = CovarianceInflation( Xn,
1694 selfA._parameters["InflationType"],
1695 selfA._parameters["InflationFactor"],
1698 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1699 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1701 returnSerieAsArrayMatrix = True )
1702 qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
1703 Xn_predicted = EMX + qi
1704 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1705 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1706 Xn_predicted = Xn_predicted + Cm * Un
1707 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1708 # --- > Par principe, M = Id, Q = 0
1711 #--------------------------
1712 if VariantM == "MLEF13":
1713 Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
1714 EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
1715 Ua = numpy.identity(__m)
1719 Ta = numpy.identity(__m)
1720 vw = numpy.zeros(__m)
1721 while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
1722 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
1725 E1 = vx1 + _epsilon * EaX
1727 E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
1729 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
1731 returnSerieAsArrayMatrix = True )
1732 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1735 EaY = (HE2 - vy2) / _epsilon
1737 EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
1739 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
1740 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY)
1741 Deltaw = - numpy.linalg.solve(mH,GradJ)
1746 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1751 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
1753 Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
1754 #--------------------------
1756 raise ValueError("VariantM has to be chosen in the authorized methods list.")
1758 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1759 Xn = CovarianceInflation( Xn,
1760 selfA._parameters["InflationType"],
1761 selfA._parameters["InflationFactor"],
1764 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1765 #--------------------------
1767 if selfA._parameters["StoreInternalVariables"] \
1768 or selfA._toStore("CostFunctionJ") \
1769 or selfA._toStore("CostFunctionJb") \
1770 or selfA._toStore("CostFunctionJo") \
1771 or selfA._toStore("APosterioriCovariance") \
1772 or selfA._toStore("InnovationAtCurrentAnalysis") \
1773 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1774 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1775 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
1776 _Innovation = Ynpu - _HXa
1778 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1779 # ---> avec analysis
1780 selfA.StoredVariables["Analysis"].store( Xa )
1781 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1782 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1783 if selfA._toStore("InnovationAtCurrentAnalysis"):
1784 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1785 # ---> avec current state
1786 if selfA._parameters["StoreInternalVariables"] \
1787 or selfA._toStore("CurrentState"):
1788 selfA.StoredVariables["CurrentState"].store( Xn )
1789 if selfA._toStore("ForecastState"):
1790 selfA.StoredVariables["ForecastState"].store( EMX )
1791 if selfA._toStore("BMA"):
1792 selfA.StoredVariables["BMA"].store( EMX - Xa )
1793 if selfA._toStore("InnovationAtCurrentState"):
1794 selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
1795 if selfA._toStore("SimulatedObservationAtCurrentState") \
1796 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1797 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
1799 if selfA._parameters["StoreInternalVariables"] \
1800 or selfA._toStore("CostFunctionJ") \
1801 or selfA._toStore("CostFunctionJb") \
1802 or selfA._toStore("CostFunctionJo") \
1803 or selfA._toStore("CurrentOptimum") \
1804 or selfA._toStore("APosterioriCovariance"):
1805 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
1806 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
1808 selfA.StoredVariables["CostFunctionJb"].store( Jb )
1809 selfA.StoredVariables["CostFunctionJo"].store( Jo )
1810 selfA.StoredVariables["CostFunctionJ" ].store( J )
1812 if selfA._toStore("IndexOfOptimum") \
1813 or selfA._toStore("CurrentOptimum") \
1814 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1815 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1816 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1817 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1818 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1819 if selfA._toStore("IndexOfOptimum"):
1820 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1821 if selfA._toStore("CurrentOptimum"):
1822 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1823 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1824 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1825 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1826 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1827 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1828 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1829 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1830 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1831 if selfA._toStore("APosterioriCovariance"):
1832 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1833 if selfA._parameters["EstimationOf"] == "Parameters" \
1834 and J < previousJMinimum:
1835 previousJMinimum = J
1837 if selfA._toStore("APosterioriCovariance"):
1838 covarianceXaMin = Pn
1840 # Stockage final supplémentaire de l'optimum en estimation de paramètres
1841 # ----------------------------------------------------------------------
1842 if selfA._parameters["EstimationOf"] == "Parameters":
1843 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1844 selfA.StoredVariables["Analysis"].store( XaMin )
1845 if selfA._toStore("APosterioriCovariance"):
1846 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1847 if selfA._toStore("BMA"):
1848 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1852 # ==============================================================================
1864 Implémentation informatique de l'algorithme MMQR, basée sur la publication :
1865 David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
1866 Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
1869 # Recuperation des donnees et informations initiales
1870 # --------------------------------------------------
1871 variables = numpy.ravel( x0 )
1872 mesures = numpy.ravel( y )
1873 increment = sys.float_info[0]
1876 quantile = float(quantile)
1878 # Calcul des parametres du MM
1879 # ---------------------------
1880 tn = float(toler) / n
1881 e0 = -tn / math.log(tn)
1882 epsilon = (e0-tn)/(1+math.log(e0))
1884 # Calculs d'initialisation
1885 # ------------------------
1886 residus = mesures - numpy.ravel( func( variables ) )
1887 poids = 1./(epsilon+numpy.abs(residus))
1888 veps = 1. - 2. * quantile - residus * poids
1889 lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
1892 # Recherche iterative
1893 # -------------------
1894 while (increment > toler) and (iteration < maxfun) :
1897 Derivees = numpy.array(fprime(variables))
1898 Derivees = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
1899 DeriveesT = Derivees.transpose()
1900 M = numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
1901 SM = numpy.transpose(numpy.dot( DeriveesT , veps ))
1902 step = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
1904 variables = variables + step
1905 if bounds is not None:
1906 # Attention : boucle infinie à éviter si un intervalle est trop petit
1907 while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
1909 variables = variables - step
1910 residus = mesures - numpy.ravel( func(variables) )
1911 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
1913 while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
1915 variables = variables - step
1916 residus = mesures - numpy.ravel( func(variables) )
1917 surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
1919 increment = lastsurrogate-surrogate
1920 poids = 1./(epsilon+numpy.abs(residus))
1921 veps = 1. - 2. * quantile - residus * poids
1922 lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
1926 Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
1928 return variables, Ecart, [n,p,iteration,increment,0]
1930 # ==============================================================================
1931 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
1933 3DVAR multi-pas et multi-méthodes
1938 Xn = numpy.ravel(Xb).reshape((-1,1))
1940 if selfA._parameters["EstimationOf"] == "State":
1941 M = EM["Direct"].appliedTo
1943 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1944 selfA.StoredVariables["Analysis"].store( Xn )
1945 if selfA._toStore("APosterioriCovariance"):
1946 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(Xn.size)
1948 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1949 if selfA._toStore("ForecastState"):
1950 selfA.StoredVariables["ForecastState"].store( Xn )
1952 if hasattr(Y,"stepnumber"):
1953 duration = Y.stepnumber()
1959 for step in range(duration-1):
1960 if hasattr(Y,"store"):
1961 Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
1963 Ynpu = numpy.ravel( Y ).reshape((-1,1))
1965 if selfA._parameters["EstimationOf"] == "State": # Forecast
1966 Xn = selfA.StoredVariables["Analysis"][-1]
1967 Xn_predicted = M( Xn )
1968 if selfA._toStore("ForecastState"):
1969 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1970 elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
1971 # --- > Par principe, M = Id, Q = 0
1973 Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
1975 oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
1979 # ==============================================================================
1980 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1989 Hm = HO["Direct"].appliedTo
1991 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
1992 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
1993 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
1996 HXb = numpy.asmatrix(numpy.ravel( HXb )).T
1997 if Y.size != HXb.size:
1998 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
1999 if max(Y.shape) != max(HXb.shape):
2000 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2002 if selfA._toStore("JacobianMatrixAtBackground"):
2003 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2004 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2005 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2007 Ht = HO["Tangent"].asMatrix(Xb)
2009 HBHTpR = R + Ht * BHT
2010 Innovation = Y - HXb
2012 # Point de démarrage de l'optimisation
2013 Xini = numpy.zeros(Xb.shape)
2015 # Définition de la fonction-coût
2016 # ------------------------------
2017 def CostFunction(w):
2018 _W = numpy.asmatrix(numpy.ravel( w )).T
2019 if selfA._parameters["StoreInternalVariables"] or \
2020 selfA._toStore("CurrentState") or \
2021 selfA._toStore("CurrentOptimum"):
2022 selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
2023 if selfA._toStore("SimulatedObservationAtCurrentState") or \
2024 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2025 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
2026 if selfA._toStore("InnovationAtCurrentState"):
2027 selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
2029 Jb = float( 0.5 * _W.T * HBHTpR * _W )
2030 Jo = float( - _W.T * Innovation )
2033 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2034 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2035 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2036 selfA.StoredVariables["CostFunctionJ" ].store( J )
2037 if selfA._toStore("IndexOfOptimum") or \
2038 selfA._toStore("CurrentOptimum") or \
2039 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2040 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2041 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2042 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2043 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2044 if selfA._toStore("IndexOfOptimum"):
2045 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2046 if selfA._toStore("CurrentOptimum"):
2047 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2048 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2049 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2050 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2051 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2052 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2053 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2054 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2055 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2058 def GradientOfCostFunction(w):
2059 _W = numpy.asmatrix(numpy.ravel( w )).T
2060 GradJb = HBHTpR * _W
2061 GradJo = - Innovation
2062 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2065 # Minimisation de la fonctionnelle
2066 # --------------------------------
2067 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2069 if selfA._parameters["Minimizer"] == "LBFGSB":
2070 if "0.19" <= scipy.version.version <= "1.1.0":
2071 import lbfgsbhlt as optimiseur
2073 import scipy.optimize as optimiseur
2074 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2075 func = CostFunction,
2077 fprime = GradientOfCostFunction,
2079 bounds = selfA._parameters["Bounds"],
2080 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
2081 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
2082 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2083 iprint = selfA._parameters["optiprint"],
2085 nfeval = Informations['funcalls']
2086 rc = Informations['warnflag']
2087 elif selfA._parameters["Minimizer"] == "TNC":
2088 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2089 func = CostFunction,
2091 fprime = GradientOfCostFunction,
2093 bounds = selfA._parameters["Bounds"],
2094 maxfun = selfA._parameters["MaximumNumberOfSteps"],
2095 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2096 ftol = selfA._parameters["CostDecrementTolerance"],
2097 messages = selfA._parameters["optmessages"],
2099 elif selfA._parameters["Minimizer"] == "CG":
2100 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2103 fprime = GradientOfCostFunction,
2105 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2106 gtol = selfA._parameters["GradientNormTolerance"],
2107 disp = selfA._parameters["optdisp"],
2110 elif selfA._parameters["Minimizer"] == "NCG":
2111 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2114 fprime = GradientOfCostFunction,
2116 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2117 avextol = selfA._parameters["CostDecrementTolerance"],
2118 disp = selfA._parameters["optdisp"],
2121 elif selfA._parameters["Minimizer"] == "BFGS":
2122 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2125 fprime = GradientOfCostFunction,
2127 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2128 gtol = selfA._parameters["GradientNormTolerance"],
2129 disp = selfA._parameters["optdisp"],
2133 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2135 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2136 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2138 # Correction pour pallier a un bug de TNC sur le retour du Minimum
2139 # ----------------------------------------------------------------
2140 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2141 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2142 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
2144 Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
2146 # Obtention de l'analyse
2147 # ----------------------
2150 selfA.StoredVariables["Analysis"].store( Xa )
2152 if selfA._toStore("OMA") or \
2153 selfA._toStore("SigmaObs2") or \
2154 selfA._toStore("SimulationQuantiles") or \
2155 selfA._toStore("SimulatedObservationAtOptimum"):
2156 if selfA._toStore("SimulatedObservationAtCurrentState"):
2157 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2158 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2159 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2163 # Calcul de la covariance d'analyse
2164 # ---------------------------------
2165 if selfA._toStore("APosterioriCovariance") or \
2166 selfA._toStore("SimulationQuantiles") or \
2167 selfA._toStore("JacobianMatrixAtOptimum") or \
2168 selfA._toStore("KalmanGainAtOptimum"):
2169 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2170 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2171 if selfA._toStore("APosterioriCovariance") or \
2172 selfA._toStore("SimulationQuantiles") or \
2173 selfA._toStore("KalmanGainAtOptimum"):
2174 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2175 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2176 if selfA._toStore("APosterioriCovariance") or \
2177 selfA._toStore("SimulationQuantiles"):
2183 _ee = numpy.matrix(numpy.zeros(nb)).T
2185 _HtEE = numpy.dot(HtM,_ee)
2186 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
2187 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2188 HessienneI = numpy.matrix( HessienneI )
2190 if min(A.shape) != max(A.shape):
2191 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2192 if (numpy.diag(A) < 0).any():
2193 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2194 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2196 L = numpy.linalg.cholesky( A )
2198 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2199 if selfA._toStore("APosterioriCovariance"):
2200 selfA.StoredVariables["APosterioriCovariance"].store( A )
2201 if selfA._toStore("JacobianMatrixAtOptimum"):
2202 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2203 if selfA._toStore("KalmanGainAtOptimum"):
2204 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2205 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2206 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2208 # Calculs et/ou stockages supplémentaires
2209 # ---------------------------------------
2210 if selfA._toStore("Innovation") or \
2211 selfA._toStore("SigmaObs2") or \
2212 selfA._toStore("MahalanobisConsistency") or \
2213 selfA._toStore("OMB"):
2215 if selfA._toStore("Innovation"):
2216 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2217 if selfA._toStore("BMA"):
2218 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2219 if selfA._toStore("OMA"):
2220 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2221 if selfA._toStore("OMB"):
2222 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2223 if selfA._toStore("SigmaObs2"):
2224 TraceR = R.trace(Y.size)
2225 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2226 if selfA._toStore("MahalanobisConsistency"):
2227 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2228 if selfA._toStore("SimulationQuantiles"):
2229 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
2230 HXa = numpy.matrix(numpy.ravel( HXa )).T
2232 for i in range(nech):
2233 if selfA._parameters["SimulationForQuantiles"] == "Linear":
2234 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
2235 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
2237 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
2238 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
2239 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
2243 YfQ = numpy.hstack((YfQ,Yr))
2246 for quantile in selfA._parameters["Quantiles"]:
2247 if not (0. <= float(quantile) <= 1.): continue
2248 indice = int(nech * float(quantile) - 1./nech)
2249 if YQ is None: YQ = YfQ[:,indice]
2250 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
2251 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
2252 if selfA._toStore("SimulatedObservationAtBackground"):
2253 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2254 if selfA._toStore("SimulatedObservationAtOptimum"):
2255 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2259 # ==============================================================================
2260 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
2264 if selfA._parameters["EstimationOf"] == "Parameters":
2265 selfA._parameters["StoreInternalVariables"] = True
2269 H = HO["Direct"].appliedControledFormTo
2271 if selfA._parameters["EstimationOf"] == "State":
2272 M = EM["Direct"].appliedControledFormTo
2274 if CM is not None and "Tangent" in CM and U is not None:
2275 Cm = CM["Tangent"].asMatrix(Xb)
2279 # Nombre de pas identique au nombre de pas d'observations
2280 # -------------------------------------------------------
2281 if hasattr(Y,"stepnumber"):
2282 duration = Y.stepnumber()
2283 __p = numpy.cumprod(Y.shape())[-1]
2286 __p = numpy.array(Y).size
2288 # Précalcul des inversions de B et R
2289 # ----------------------------------
2290 if selfA._parameters["StoreInternalVariables"] \
2291 or selfA._toStore("CostFunctionJ") \
2292 or selfA._toStore("CostFunctionJb") \
2293 or selfA._toStore("CostFunctionJo") \
2294 or selfA._toStore("CurrentOptimum") \
2295 or selfA._toStore("APosterioriCovariance"):
2302 __m = selfA._parameters["NumberOfMembers"]
2303 if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2305 if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
2307 if hasattr(Q,"asfullmatrix"): Qn = Q.asfullmatrix(__n)
2309 Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2311 if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2312 selfA.StoredVariables["Analysis"].store( Xb )
2313 if selfA._toStore("APosterioriCovariance"):
2314 selfA.StoredVariables["APosterioriCovariance"].store( Pn )
2317 previousJMinimum = numpy.finfo(float).max
2319 for step in range(duration-1):
2320 if hasattr(Y,"store"):
2321 Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2323 Ynpu = numpy.ravel( Y ).reshape((__p,1))
2326 if hasattr(U,"store") and len(U)>1:
2327 Un = numpy.asmatrix(numpy.ravel( U[step] )).T
2328 elif hasattr(U,"store") and len(U)==1:
2329 Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2331 Un = numpy.asmatrix(numpy.ravel( U )).T
2335 if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2336 Xn = CovarianceInflation( Xn,
2337 selfA._parameters["InflationType"],
2338 selfA._parameters["InflationFactor"],
2341 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2342 EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2344 returnSerieAsArrayMatrix = True )
2345 qi = numpy.random.multivariate_normal(numpy.zeros(__n), Qn, size=__m).T
2346 Xn_predicted = EMX + qi
2347 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2349 returnSerieAsArrayMatrix = True )
2350 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2351 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2352 Xn_predicted = Xn_predicted + Cm * Un
2353 elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2354 # --- > Par principe, M = Id, Q = 0
2356 HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
2358 returnSerieAsArrayMatrix = True )
2360 # Mean of forecast and observation of forecast
2361 Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2362 Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2364 #--------------------------
2365 if VariantM == "KalmanFilterFormula05":
2366 PfHT, HPfHT = 0., 0.
2367 for i in range(__m):
2368 Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
2369 Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
2370 PfHT += Exfi * Eyfi.T
2371 HPfHT += Eyfi * Eyfi.T
2372 PfHT = (1./(__m-1)) * PfHT
2373 HPfHT = (1./(__m-1)) * HPfHT
2374 Kn = PfHT * ( R + HPfHT ).I
2377 for i in range(__m):
2378 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
2379 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
2380 #--------------------------
2381 elif VariantM == "KalmanFilterFormula16":
2382 EpY = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
2383 EpYm = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2385 EaX = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
2386 EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
2388 Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
2390 for i in range(__m):
2391 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
2392 #--------------------------
2394 raise ValueError("VariantM has to be chosen in the authorized methods list.")
2396 if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2397 Xn = CovarianceInflation( Xn,
2398 selfA._parameters["InflationType"],
2399 selfA._parameters["InflationFactor"],
2402 Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2403 #--------------------------
2405 if selfA._parameters["StoreInternalVariables"] \
2406 or selfA._toStore("CostFunctionJ") \
2407 or selfA._toStore("CostFunctionJb") \
2408 or selfA._toStore("CostFunctionJo") \
2409 or selfA._toStore("APosterioriCovariance") \
2410 or selfA._toStore("InnovationAtCurrentAnalysis") \
2411 or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2412 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2413 _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
2414 _Innovation = Ynpu - _HXa
2416 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2417 # ---> avec analysis
2418 selfA.StoredVariables["Analysis"].store( Xa )
2419 if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2420 selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2421 if selfA._toStore("InnovationAtCurrentAnalysis"):
2422 selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2423 # ---> avec current state
2424 if selfA._parameters["StoreInternalVariables"] \
2425 or selfA._toStore("CurrentState"):
2426 selfA.StoredVariables["CurrentState"].store( Xn )
2427 if selfA._toStore("ForecastState"):
2428 selfA.StoredVariables["ForecastState"].store( EMX )
2429 if selfA._toStore("BMA"):
2430 selfA.StoredVariables["BMA"].store( EMX - Xa )
2431 if selfA._toStore("InnovationAtCurrentState"):
2432 selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
2433 if selfA._toStore("SimulatedObservationAtCurrentState") \
2434 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2435 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
2437 if selfA._parameters["StoreInternalVariables"] \
2438 or selfA._toStore("CostFunctionJ") \
2439 or selfA._toStore("CostFunctionJb") \
2440 or selfA._toStore("CostFunctionJo") \
2441 or selfA._toStore("CurrentOptimum") \
2442 or selfA._toStore("APosterioriCovariance"):
2443 Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
2444 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2446 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2447 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2448 selfA.StoredVariables["CostFunctionJ" ].store( J )
2450 if selfA._toStore("IndexOfOptimum") \
2451 or selfA._toStore("CurrentOptimum") \
2452 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2453 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2454 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2455 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2456 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2457 if selfA._toStore("IndexOfOptimum"):
2458 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2459 if selfA._toStore("CurrentOptimum"):
2460 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2461 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2462 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2463 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2464 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2465 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2466 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2467 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2468 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2469 if selfA._toStore("APosterioriCovariance"):
2470 selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2471 if selfA._parameters["EstimationOf"] == "Parameters" \
2472 and J < previousJMinimum:
2473 previousJMinimum = J
2475 if selfA._toStore("APosterioriCovariance"):
2476 covarianceXaMin = Pn
2478 # Stockage final supplémentaire de l'optimum en estimation de paramètres
2479 # ----------------------------------------------------------------------
2480 if selfA._parameters["EstimationOf"] == "Parameters":
2481 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2482 selfA.StoredVariables["Analysis"].store( XaMin )
2483 if selfA._toStore("APosterioriCovariance"):
2484 selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2485 if selfA._toStore("BMA"):
2486 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2490 # ==============================================================================
2491 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2500 Hm = HO["Direct"].appliedTo
2501 Ha = HO["Adjoint"].appliedInXTo
2503 # Utilisation éventuelle d'un vecteur H(Xb) précalculé
2504 if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2505 HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2508 HXb = numpy.asmatrix(numpy.ravel( HXb )).T
2509 if Y.size != HXb.size:
2510 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2511 if max(Y.shape) != max(HXb.shape):
2512 raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2514 if selfA._toStore("JacobianMatrixAtBackground"):
2515 HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2516 HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2517 selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2519 # Précalcul des inversions de B et R
2523 # Point de démarrage de l'optimisation
2524 Xini = selfA._parameters["InitializationPoint"]
2526 # Définition de la fonction-coût
2527 # ------------------------------
2528 def CostFunction(x):
2529 _X = numpy.asmatrix(numpy.ravel( x )).T
2530 if selfA._parameters["StoreInternalVariables"] or \
2531 selfA._toStore("CurrentState") or \
2532 selfA._toStore("CurrentOptimum"):
2533 selfA.StoredVariables["CurrentState"].store( _X )
2535 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2536 _Innovation = Y - _HX
2537 if selfA._toStore("SimulatedObservationAtCurrentState") or \
2538 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2539 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
2540 if selfA._toStore("InnovationAtCurrentState"):
2541 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
2543 Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
2544 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
2547 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2548 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2549 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2550 selfA.StoredVariables["CostFunctionJ" ].store( J )
2551 if selfA._toStore("IndexOfOptimum") or \
2552 selfA._toStore("CurrentOptimum") or \
2553 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2554 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2555 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2556 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2557 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2558 if selfA._toStore("IndexOfOptimum"):
2559 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2560 if selfA._toStore("CurrentOptimum"):
2561 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2562 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2563 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2564 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2565 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2566 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2567 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2568 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2569 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2572 def GradientOfCostFunction(x):
2573 _X = numpy.asmatrix(numpy.ravel( x )).T
2575 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2576 GradJb = BI * (_X - Xb)
2577 GradJo = - Ha( (_X, RI * (Y - _HX)) )
2578 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2581 # Minimisation de la fonctionnelle
2582 # --------------------------------
2583 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2585 if selfA._parameters["Minimizer"] == "LBFGSB":
2586 if "0.19" <= scipy.version.version <= "1.1.0":
2587 import lbfgsbhlt as optimiseur
2589 import scipy.optimize as optimiseur
2590 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2591 func = CostFunction,
2593 fprime = GradientOfCostFunction,
2595 bounds = selfA._parameters["Bounds"],
2596 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
2597 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
2598 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2599 iprint = selfA._parameters["optiprint"],
2601 nfeval = Informations['funcalls']
2602 rc = Informations['warnflag']
2603 elif selfA._parameters["Minimizer"] == "TNC":
2604 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2605 func = CostFunction,
2607 fprime = GradientOfCostFunction,
2609 bounds = selfA._parameters["Bounds"],
2610 maxfun = selfA._parameters["MaximumNumberOfSteps"],
2611 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2612 ftol = selfA._parameters["CostDecrementTolerance"],
2613 messages = selfA._parameters["optmessages"],
2615 elif selfA._parameters["Minimizer"] == "CG":
2616 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2619 fprime = GradientOfCostFunction,
2621 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2622 gtol = selfA._parameters["GradientNormTolerance"],
2623 disp = selfA._parameters["optdisp"],
2626 elif selfA._parameters["Minimizer"] == "NCG":
2627 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2630 fprime = GradientOfCostFunction,
2632 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2633 avextol = selfA._parameters["CostDecrementTolerance"],
2634 disp = selfA._parameters["optdisp"],
2637 elif selfA._parameters["Minimizer"] == "BFGS":
2638 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2641 fprime = GradientOfCostFunction,
2643 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2644 gtol = selfA._parameters["GradientNormTolerance"],
2645 disp = selfA._parameters["optdisp"],
2649 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2651 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2652 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2654 # Correction pour pallier a un bug de TNC sur le retour du Minimum
2655 # ----------------------------------------------------------------
2656 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2657 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2659 # Obtention de l'analyse
2660 # ----------------------
2661 Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
2663 selfA.StoredVariables["Analysis"].store( Xa )
2665 if selfA._toStore("OMA") or \
2666 selfA._toStore("SigmaObs2") or \
2667 selfA._toStore("SimulationQuantiles") or \
2668 selfA._toStore("SimulatedObservationAtOptimum"):
2669 if selfA._toStore("SimulatedObservationAtCurrentState"):
2670 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2671 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2672 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2676 # Calcul de la covariance d'analyse
2677 # ---------------------------------
2678 if selfA._toStore("APosterioriCovariance") or \
2679 selfA._toStore("SimulationQuantiles") or \
2680 selfA._toStore("JacobianMatrixAtOptimum") or \
2681 selfA._toStore("KalmanGainAtOptimum"):
2682 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2683 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2684 if selfA._toStore("APosterioriCovariance") or \
2685 selfA._toStore("SimulationQuantiles") or \
2686 selfA._toStore("KalmanGainAtOptimum"):
2687 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2688 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2689 if selfA._toStore("APosterioriCovariance") or \
2690 selfA._toStore("SimulationQuantiles"):
2694 _ee = numpy.matrix(numpy.zeros(nb)).T
2696 _HtEE = numpy.dot(HtM,_ee)
2697 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
2698 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
2699 HessienneI = numpy.matrix( HessienneI )
2701 if min(A.shape) != max(A.shape):
2702 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
2703 if (numpy.diag(A) < 0).any():
2704 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
2705 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
2707 L = numpy.linalg.cholesky( A )
2709 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
2710 if selfA._toStore("APosterioriCovariance"):
2711 selfA.StoredVariables["APosterioriCovariance"].store( A )
2712 if selfA._toStore("JacobianMatrixAtOptimum"):
2713 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2714 if selfA._toStore("KalmanGainAtOptimum"):
2715 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2716 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2717 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2719 # Calculs et/ou stockages supplémentaires
2720 # ---------------------------------------
2721 if selfA._toStore("Innovation") or \
2722 selfA._toStore("SigmaObs2") or \
2723 selfA._toStore("MahalanobisConsistency") or \
2724 selfA._toStore("OMB"):
2726 if selfA._toStore("Innovation"):
2727 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
2728 if selfA._toStore("BMA"):
2729 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2730 if selfA._toStore("OMA"):
2731 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2732 if selfA._toStore("OMB"):
2733 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
2734 if selfA._toStore("SigmaObs2"):
2735 TraceR = R.trace(Y.size)
2736 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
2737 if selfA._toStore("MahalanobisConsistency"):
2738 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2739 if selfA._toStore("SimulationQuantiles"):
2740 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
2741 HXa = numpy.matrix(numpy.ravel( HXa )).T
2743 for i in range(nech):
2744 if selfA._parameters["SimulationForQuantiles"] == "Linear":
2745 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
2746 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
2748 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
2749 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
2750 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
2754 YfQ = numpy.hstack((YfQ,Yr))
2757 for quantile in selfA._parameters["Quantiles"]:
2758 if not (0. <= float(quantile) <= 1.): continue
2759 indice = int(nech * float(quantile) - 1./nech)
2760 if YQ is None: YQ = YfQ[:,indice]
2761 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
2762 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
2763 if selfA._toStore("SimulatedObservationAtBackground"):
2764 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
2765 if selfA._toStore("SimulatedObservationAtOptimum"):
2766 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
2770 # ==============================================================================
2771 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2780 Hm = HO["Direct"].appliedControledFormTo
2781 Mm = EM["Direct"].appliedControledFormTo
2783 if CM is not None and "Tangent" in CM and U is not None:
2784 Cm = CM["Tangent"].asMatrix(Xb)
2790 if hasattr(U,"store") and 1<=_step<len(U) :
2791 _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
2792 elif hasattr(U,"store") and len(U)==1:
2793 _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
2795 _Un = numpy.asmatrix(numpy.ravel( U )).T
2800 if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
2801 _Cm = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
2807 # Remarque : les observations sont exploitées à partir du pas de temps
2808 # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
2809 # Donc le pas 0 n'est pas utilisé puisque la première étape commence
2810 # avec l'observation du pas 1.
2812 # Nombre de pas identique au nombre de pas d'observations
2813 if hasattr(Y,"stepnumber"):
2814 duration = Y.stepnumber()
2818 # Précalcul des inversions de B et R
2822 # Point de démarrage de l'optimisation
2823 Xini = selfA._parameters["InitializationPoint"]
2825 # Définition de la fonction-coût
2826 # ------------------------------
2827 selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
2828 selfA.DirectInnovation = [None,] # Le pas 0 n'est pas observé
2829 def CostFunction(x):
2830 _X = numpy.asmatrix(numpy.ravel( x )).T
2831 if selfA._parameters["StoreInternalVariables"] or \
2832 selfA._toStore("CurrentState") or \
2833 selfA._toStore("CurrentOptimum"):
2834 selfA.StoredVariables["CurrentState"].store( _X )
2835 Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
2836 selfA.DirectCalculation = [None,]
2837 selfA.DirectInnovation = [None,]
2840 for step in range(0,duration-1):
2841 if hasattr(Y,"store"):
2842 _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
2844 _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
2848 if selfA._parameters["EstimationOf"] == "State":
2849 _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
2850 elif selfA._parameters["EstimationOf"] == "Parameters":
2853 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
2854 _Xn = numpy.max(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
2855 _Xn = numpy.min(numpy.hstack((_Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
2857 # Etape de différence aux observations
2858 if selfA._parameters["EstimationOf"] == "State":
2859 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
2860 elif selfA._parameters["EstimationOf"] == "Parameters":
2861 _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
2863 # Stockage de l'état
2864 selfA.DirectCalculation.append( _Xn )
2865 selfA.DirectInnovation.append( _YmHMX )
2867 # Ajout dans la fonctionnelle d'observation
2868 Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
2871 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2872 selfA.StoredVariables["CostFunctionJb"].store( Jb )
2873 selfA.StoredVariables["CostFunctionJo"].store( Jo )
2874 selfA.StoredVariables["CostFunctionJ" ].store( J )
2875 if selfA._toStore("IndexOfOptimum") or \
2876 selfA._toStore("CurrentOptimum") or \
2877 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2878 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2879 selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2880 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2881 if selfA._toStore("IndexOfOptimum"):
2882 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2883 if selfA._toStore("CurrentOptimum"):
2884 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2885 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2886 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2887 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2888 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2889 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2890 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2893 def GradientOfCostFunction(x):
2894 _X = numpy.asmatrix(numpy.ravel( x )).T
2895 GradJb = BI * (_X - Xb)
2897 for step in range(duration-1,0,-1):
2898 # Étape de récupération du dernier stockage de l'évolution
2899 _Xn = selfA.DirectCalculation.pop()
2900 # Étape de récupération du dernier stockage de l'innovation
2901 _YmHMX = selfA.DirectInnovation.pop()
2902 # Calcul des adjoints
2903 Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
2904 Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
2905 Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
2906 Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
2907 # Calcul du gradient par état adjoint
2908 GradJo = GradJo + Ha * RI * _YmHMX # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
2909 GradJo = Ma * GradJo # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
2910 GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
2913 # Minimisation de la fonctionnelle
2914 # --------------------------------
2915 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2917 if selfA._parameters["Minimizer"] == "LBFGSB":
2918 if "0.19" <= scipy.version.version <= "1.1.0":
2919 import lbfgsbhlt as optimiseur
2921 import scipy.optimize as optimiseur
2922 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2923 func = CostFunction,
2925 fprime = GradientOfCostFunction,
2927 bounds = selfA._parameters["Bounds"],
2928 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
2929 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
2930 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2931 iprint = selfA._parameters["optiprint"],
2933 nfeval = Informations['funcalls']
2934 rc = Informations['warnflag']
2935 elif selfA._parameters["Minimizer"] == "TNC":
2936 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2937 func = CostFunction,
2939 fprime = GradientOfCostFunction,
2941 bounds = selfA._parameters["Bounds"],
2942 maxfun = selfA._parameters["MaximumNumberOfSteps"],
2943 pgtol = selfA._parameters["ProjectedGradientTolerance"],
2944 ftol = selfA._parameters["CostDecrementTolerance"],
2945 messages = selfA._parameters["optmessages"],
2947 elif selfA._parameters["Minimizer"] == "CG":
2948 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2951 fprime = GradientOfCostFunction,
2953 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2954 gtol = selfA._parameters["GradientNormTolerance"],
2955 disp = selfA._parameters["optdisp"],
2958 elif selfA._parameters["Minimizer"] == "NCG":
2959 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2962 fprime = GradientOfCostFunction,
2964 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2965 avextol = selfA._parameters["CostDecrementTolerance"],
2966 disp = selfA._parameters["optdisp"],
2969 elif selfA._parameters["Minimizer"] == "BFGS":
2970 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2973 fprime = GradientOfCostFunction,
2975 maxiter = selfA._parameters["MaximumNumberOfSteps"],
2976 gtol = selfA._parameters["GradientNormTolerance"],
2977 disp = selfA._parameters["optdisp"],
2981 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2983 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2984 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2986 # Correction pour pallier a un bug de TNC sur le retour du Minimum
2987 # ----------------------------------------------------------------
2988 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2989 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2991 # Obtention de l'analyse
2992 # ----------------------
2993 Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
2995 selfA.StoredVariables["Analysis"].store( Xa )
2997 # Calculs et/ou stockages supplémentaires
2998 # ---------------------------------------
2999 if selfA._toStore("BMA"):
3000 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3004 # ==============================================================================
3005 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3007 3DVAR variational analysis with no inversion of B
3014 Hm = HO["Direct"].appliedTo
3015 Ha = HO["Adjoint"].appliedInXTo
3017 # Précalcul des inversions de B et R
3021 # Point de démarrage de l'optimisation
3022 Xini = numpy.zeros(Xb.shape)
3024 # Définition de la fonction-coût
3025 # ------------------------------
3026 def CostFunction(v):
3027 _V = numpy.asmatrix(numpy.ravel( v )).T
3029 if selfA._parameters["StoreInternalVariables"] or \
3030 selfA._toStore("CurrentState") or \
3031 selfA._toStore("CurrentOptimum"):
3032 selfA.StoredVariables["CurrentState"].store( _X )
3034 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
3035 _Innovation = Y - _HX
3036 if selfA._toStore("SimulatedObservationAtCurrentState") or \
3037 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3038 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
3039 if selfA._toStore("InnovationAtCurrentState"):
3040 selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3042 Jb = float( 0.5 * _V.T * BT * _V )
3043 Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
3046 selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3047 selfA.StoredVariables["CostFunctionJb"].store( Jb )
3048 selfA.StoredVariables["CostFunctionJo"].store( Jo )
3049 selfA.StoredVariables["CostFunctionJ" ].store( J )
3050 if selfA._toStore("IndexOfOptimum") or \
3051 selfA._toStore("CurrentOptimum") or \
3052 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3053 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3054 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
3055 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3056 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3057 if selfA._toStore("IndexOfOptimum"):
3058 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3059 if selfA._toStore("CurrentOptimum"):
3060 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3061 if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3062 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
3063 if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3064 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3065 if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3066 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3067 if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3068 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3071 def GradientOfCostFunction(v):
3072 _V = numpy.asmatrix(numpy.ravel( v )).T
3075 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
3077 GradJo = - Ha( (_X, RI * (Y - _HX)) )
3078 GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
3081 # Minimisation de la fonctionnelle
3082 # --------------------------------
3083 nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3085 if selfA._parameters["Minimizer"] == "LBFGSB":
3086 if "0.19" <= scipy.version.version <= "1.1.0":
3087 import lbfgsbhlt as optimiseur
3089 import scipy.optimize as optimiseur
3090 Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3091 func = CostFunction,
3093 fprime = GradientOfCostFunction,
3095 bounds = selfA._parameters["Bounds"],
3096 maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
3097 factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
3098 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3099 iprint = selfA._parameters["optiprint"],
3101 nfeval = Informations['funcalls']
3102 rc = Informations['warnflag']
3103 elif selfA._parameters["Minimizer"] == "TNC":
3104 Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3105 func = CostFunction,
3107 fprime = GradientOfCostFunction,
3109 bounds = selfA._parameters["Bounds"],
3110 maxfun = selfA._parameters["MaximumNumberOfSteps"],
3111 pgtol = selfA._parameters["ProjectedGradientTolerance"],
3112 ftol = selfA._parameters["CostDecrementTolerance"],
3113 messages = selfA._parameters["optmessages"],
3115 elif selfA._parameters["Minimizer"] == "CG":
3116 Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3119 fprime = GradientOfCostFunction,
3121 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3122 gtol = selfA._parameters["GradientNormTolerance"],
3123 disp = selfA._parameters["optdisp"],
3126 elif selfA._parameters["Minimizer"] == "NCG":
3127 Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3130 fprime = GradientOfCostFunction,
3132 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3133 avextol = selfA._parameters["CostDecrementTolerance"],
3134 disp = selfA._parameters["optdisp"],
3137 elif selfA._parameters["Minimizer"] == "BFGS":
3138 Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3141 fprime = GradientOfCostFunction,
3143 maxiter = selfA._parameters["MaximumNumberOfSteps"],
3144 gtol = selfA._parameters["GradientNormTolerance"],
3145 disp = selfA._parameters["optdisp"],
3149 raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3151 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3152 MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3154 # Correction pour pallier a un bug de TNC sur le retour du Minimum
3155 # ----------------------------------------------------------------
3156 if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3157 Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3158 Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
3160 Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
3162 # Obtention de l'analyse
3163 # ----------------------
3166 selfA.StoredVariables["Analysis"].store( Xa )
3168 if selfA._toStore("OMA") or \
3169 selfA._toStore("SigmaObs2") or \
3170 selfA._toStore("SimulationQuantiles") or \
3171 selfA._toStore("SimulatedObservationAtOptimum"):
3172 if selfA._toStore("SimulatedObservationAtCurrentState"):
3173 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3174 elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3175 HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3179 # Calcul de la covariance d'analyse
3180 # ---------------------------------
3181 if selfA._toStore("APosterioriCovariance") or \
3182 selfA._toStore("SimulationQuantiles") or \
3183 selfA._toStore("JacobianMatrixAtOptimum") or \
3184 selfA._toStore("KalmanGainAtOptimum"):
3185 HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3186 HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3187 if selfA._toStore("APosterioriCovariance") or \
3188 selfA._toStore("SimulationQuantiles") or \
3189 selfA._toStore("KalmanGainAtOptimum"):
3190 HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3191 HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3192 if selfA._toStore("APosterioriCovariance") or \
3193 selfA._toStore("SimulationQuantiles"):
3198 _ee = numpy.matrix(numpy.zeros(nb)).T
3200 _HtEE = numpy.dot(HtM,_ee)
3201 _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T
3202 HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
3203 HessienneI = numpy.matrix( HessienneI )
3205 if min(A.shape) != max(A.shape):
3206 raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
3207 if (numpy.diag(A) < 0).any():
3208 raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
3209 if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
3211 L = numpy.linalg.cholesky( A )
3213 raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
3214 if selfA._toStore("APosterioriCovariance"):
3215 selfA.StoredVariables["APosterioriCovariance"].store( A )
3216 if selfA._toStore("JacobianMatrixAtOptimum"):
3217 selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3218 if selfA._toStore("KalmanGainAtOptimum"):
3219 if (Y.size <= Xb.size): KG = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3220 elif (Y.size > Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3221 selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3223 # Calculs et/ou stockages supplémentaires
3224 # ---------------------------------------
3225 if selfA._toStore("Innovation") or \
3226 selfA._toStore("SigmaObs2") or \
3227 selfA._toStore("MahalanobisConsistency") or \
3228 selfA._toStore("OMB"):
3230 if selfA._toStore("Innovation"):
3231 selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
3232 if selfA._toStore("BMA"):
3233 selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3234 if selfA._toStore("OMA"):
3235 selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3236 if selfA._toStore("OMB"):
3237 selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
3238 if selfA._toStore("SigmaObs2"):
3239 TraceR = R.trace(Y.size)
3240 selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
3241 if selfA._toStore("MahalanobisConsistency"):
3242 selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3243 if selfA._toStore("SimulationQuantiles"):
3244 nech = selfA._parameters["NumberOfSamplesForQuantiles"]
3245 HXa = numpy.matrix(numpy.ravel( HXa )).T
3247 for i in range(nech):
3248 if selfA._parameters["SimulationForQuantiles"] == "Linear":
3249 dXr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A) - Xa.A1).T
3250 dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
3252 elif selfA._parameters["SimulationForQuantiles"] == "NonLinear":
3253 Xr = numpy.matrix(numpy.random.multivariate_normal(Xa.A1,A)).T
3254 Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
3258 YfQ = numpy.hstack((YfQ,Yr))
3261 for quantile in selfA._parameters["Quantiles"]:
3262 if not (0. <= float(quantile) <= 1.): continue
3263 indice = int(nech * float(quantile) - 1./nech)
3264 if YQ is None: YQ = YfQ[:,indice]
3265 else: YQ = numpy.hstack((YQ,YfQ[:,indice]))
3266 selfA.StoredVariables["SimulationQuantiles"].store( YQ )
3267 if selfA._toStore("SimulatedObservationAtBackground"):
3268 selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
3269 if selfA._toStore("SimulatedObservationAtOptimum"):
3270 selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
3274 # ==============================================================================
3275 if __name__ == "__main__":
3276 print('\n AUTODIAGNOSTIC\n')