Salome HOME
Minor documentation and code review corrections (13)
[modules/adao.git] / src / daComposant / daCore / NumericObjects.py
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) 2008-2021 EDF R&D
4 #
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
9 #
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13 # Lesser General Public License for more details.
14 #
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18 #
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
22
23 __doc__ = """
24     Définit les objets numériques génériques.
25 """
26 __author__ = "Jean-Philippe ARGAUD"
27
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator, Covariance, PartialAlgorithm
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
35
36 # ==============================================================================
37 def ExecuteFunction( triplet ):
38     assert len(triplet) == 3, "Incorrect number of arguments"
39     X, xArgs, funcrepr = triplet
40     __X = numpy.ravel( X ).reshape((-1,1))
41     __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42     __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43     __fonction = getattr(__module,funcrepr["__userFunction__name"])
44     sys.path = __sys_path_tmp ; del __sys_path_tmp
45     if isinstance(xArgs, dict):
46         __HX  = __fonction( __X, **xArgs )
47     else:
48         __HX  = __fonction( __X )
49     return numpy.ravel( __HX )
50
51 # ==============================================================================
52 class FDApproximation(object):
53     """
54     Cette classe sert d'interface pour définir les opérateurs approximés. A la
55     création d'un objet, en fournissant une fonction "Function", on obtient un
56     objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
57     "AdjointOperator". On contrôle l'approximation DF avec l'incrément
58     multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
59     "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
60     centrées si le booléen "centeredDF" est vrai.
61     """
62     def __init__(self,
63             name                  = "FDApproximation",
64             Function              = None,
65             centeredDF            = False,
66             increment             = 0.01,
67             dX                    = None,
68             extraArguments        = None,
69             reducingMemoryUse     = False,
70             avoidingRedundancy    = True,
71             toleranceInRedundancy = 1.e-18,
72             lenghtOfRedundancy    = -1,
73             mpEnabled             = False,
74             mpWorkers             = None,
75             mfEnabled             = False,
76             ):
77         self.__name = str(name)
78         self.__extraArgs = extraArguments
79         #
80         if mpEnabled:
81             try:
82                 import multiprocessing
83                 self.__mpEnabled = True
84             except ImportError:
85                 self.__mpEnabled = False
86         else:
87             self.__mpEnabled = False
88         self.__mpWorkers = mpWorkers
89         if self.__mpWorkers is not None and self.__mpWorkers < 1:
90             self.__mpWorkers = None
91         logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
92         #
93         self.__mfEnabled = bool(mfEnabled)
94         logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
95         #
96         self.__rmEnabled = bool(reducingMemoryUse)
97         logging.debug("FDA Calculs avec réduction mémoire : %s"%(self.__rmEnabled,))
98         #
99         if avoidingRedundancy:
100             self.__avoidRC = True
101             self.__tolerBP = float(toleranceInRedundancy)
102             self.__lenghtRJ = int(lenghtOfRedundancy)
103             self.__listJPCP = [] # Jacobian Previous Calculated Points
104             self.__listJPCI = [] # Jacobian Previous Calculated Increment
105             self.__listJPCR = [] # Jacobian Previous Calculated Results
106             self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
107             self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
108         else:
109             self.__avoidRC = False
110         logging.debug("FDA Calculs avec réduction des doublons : %s"%self.__avoidRC)
111         if self.__avoidRC:
112             logging.debug("FDA Tolérance de détermination des doublons : %.2e"%self.__tolerBP)
113         #
114         if self.__mpEnabled:
115             if isinstance(Function,types.FunctionType):
116                 logging.debug("FDA Calculs en multiprocessing : FunctionType")
117                 self.__userFunction__name = Function.__name__
118                 try:
119                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
120                 except:
121                     mod = os.path.abspath(Function.__globals__['__file__'])
122                 if not os.path.isfile(mod):
123                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
124                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
125                 self.__userFunction__path = os.path.dirname(mod)
126                 del mod
127                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
128                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
129             elif isinstance(Function,types.MethodType):
130                 logging.debug("FDA Calculs en multiprocessing : MethodType")
131                 self.__userFunction__name = Function.__name__
132                 try:
133                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
134                 except:
135                     mod = os.path.abspath(Function.__func__.__globals__['__file__'])
136                 if not os.path.isfile(mod):
137                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
138                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
139                 self.__userFunction__path = os.path.dirname(mod)
140                 del mod
141                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
142                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
143             else:
144                 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
145         else:
146             self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
147             self.__userFunction = self.__userOperator.appliedTo
148         #
149         self.__centeredDF = bool(centeredDF)
150         if abs(float(increment)) > 1.e-15:
151             self.__increment  = float(increment)
152         else:
153             self.__increment  = 0.01
154         if dX is None:
155             self.__dX     = None
156         else:
157             self.__dX     = numpy.ravel( dX )
158
159     # ---------------------------------------------------------
160     def __doublon__(self, e, l, n, v=None):
161         __ac, __iac = False, -1
162         for i in range(len(l)-1,-1,-1):
163             if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
164                 __ac, __iac = True, i
165                 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
166                 break
167         return __ac, __iac
168
169     # ---------------------------------------------------------
170     def __listdotwith__(self, __LMatrix, __dotWith = None, __dotTWith = None):
171         "Produit incrémental d'une matrice liste de colonnes avec un vecteur"
172         if not isinstance(__LMatrix, (list,tuple)):
173             raise TypeError("Columnwise list matrix has not the proper type: %s"%type(__LMatrix))
174         if __dotWith is not None:
175             __Idwx = numpy.ravel( __dotWith )
176             assert len(__LMatrix) == __Idwx.size, "Incorrect size of elements"
177             __Produit = numpy.zeros(__LMatrix[0].size)
178             for i, col in enumerate(__LMatrix):
179                 __Produit += float(__Idwx[i]) * col
180             return __Produit
181         elif __dotTWith is not None:
182             _Idwy = numpy.ravel( __dotTWith ).T
183             assert __LMatrix[0].size == _Idwy.size, "Incorrect size of elements"
184             __Produit = numpy.zeros(len(__LMatrix))
185             for i, col in enumerate(__LMatrix):
186                 __Produit[i] = float( _Idwy @ col)
187             return __Produit
188         else:
189             __Produit = None
190         return __Produit
191
192     # ---------------------------------------------------------
193     def DirectOperator(self, X, **extraArgs ):
194         """
195         Calcul du direct à l'aide de la fonction fournie.
196
197         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
198         ne doivent pas être données ici à la fonction utilisateur.
199         """
200         logging.debug("FDA Calcul DirectOperator (explicite)")
201         if self.__mfEnabled:
202             _HX = self.__userFunction( X, argsAsSerie = True )
203         else:
204             _HX = numpy.ravel(self.__userFunction( numpy.ravel(X) ))
205         #
206         return _HX
207
208     # ---------------------------------------------------------
209     def TangentMatrix(self, X, dotWith = None, dotTWith = None ):
210         """
211         Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
212         c'est-à-dire le gradient de H en X. On utilise des différences finies
213         directionnelles autour du point X. X est un numpy.ndarray.
214
215         Différences finies centrées (approximation d'ordre 2):
216         1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
217            dX[i] à la  composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
218            on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
219            H( X_moins_dXi )
220         2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
221            le pas 2*dXi
222         3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
223
224         Différences finies non centrées (approximation d'ordre 1):
225         1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
226            composante X[i] pour composer X_plus_dXi, et on calcule la réponse
227            HX_plus_dXi = H( X_plus_dXi )
228         2/ On calcule la valeur centrale HX = H(X)
229         3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
230            le pas dXi
231         4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
232
233         """
234         logging.debug("FDA Début du calcul de la Jacobienne")
235         logging.debug("FDA   Incrément de............: %s*X"%float(self.__increment))
236         logging.debug("FDA   Approximation centrée...: %s"%(self.__centeredDF))
237         #
238         if X is None or len(X)==0:
239             raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
240         #
241         _X = numpy.ravel( X )
242         #
243         if self.__dX is None:
244             _dX  = self.__increment * _X
245         else:
246             _dX = numpy.ravel( self.__dX )
247         assert len(_X) == len(_dX), "Inconsistent dX increment length with respect to the X one"
248         assert _X.size == _dX.size, "Inconsistent dX increment size with respect to the X one"
249         #
250         if (_dX == 0.).any():
251             moyenne = _dX.mean()
252             if moyenne == 0.:
253                 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
254             else:
255                 _dX = numpy.where( _dX == 0., moyenne, _dX )
256         #
257         __alreadyCalculated  = False
258         if self.__avoidRC:
259             __bidon, __alreadyCalculatedP = self.__doublon__(_X,  self.__listJPCP, self.__listJPPN, None)
260             __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
261             if __alreadyCalculatedP == __alreadyCalculatedI > -1:
262                 __alreadyCalculated, __i = True, __alreadyCalculatedP
263                 logging.debug("FDA Cas J déjà calculé, récupération du doublon %i"%__i)
264         #
265         if __alreadyCalculated:
266             logging.debug("FDA   Calcul Jacobienne (par récupération du doublon %i)"%__i)
267             _Jacobienne = self.__listJPCR[__i]
268             logging.debug("FDA Fin du calcul de la Jacobienne")
269             if dotWith is not None:
270                 return numpy.dot(_Jacobienne,   numpy.ravel( dotWith ))
271             elif dotTWith is not None:
272                 return numpy.dot(_Jacobienne.T, numpy.ravel( dotTWith ))
273         else:
274             logging.debug("FDA   Calcul Jacobienne (explicite)")
275             if self.__centeredDF:
276                 #
277                 if self.__mpEnabled and not self.__mfEnabled:
278                     funcrepr = {
279                         "__userFunction__path" : self.__userFunction__path,
280                         "__userFunction__modl" : self.__userFunction__modl,
281                         "__userFunction__name" : self.__userFunction__name,
282                     }
283                     _jobs = []
284                     for i in range( len(_dX) ):
285                         _dXi            = _dX[i]
286                         _X_plus_dXi     = numpy.array( _X, dtype=float )
287                         _X_plus_dXi[i]  = _X[i] + _dXi
288                         _X_moins_dXi    = numpy.array( _X, dtype=float )
289                         _X_moins_dXi[i] = _X[i] - _dXi
290                         #
291                         _jobs.append( (_X_plus_dXi,  self.__extraArgs, funcrepr) )
292                         _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
293                     #
294                     import multiprocessing
295                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
296                     _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
297                     self.__pool.close()
298                     self.__pool.join()
299                     #
300                     _Jacobienne  = []
301                     for i in range( len(_dX) ):
302                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
303                     #
304                 elif self.__mfEnabled:
305                     _xserie = []
306                     for i in range( len(_dX) ):
307                         _dXi            = _dX[i]
308                         _X_plus_dXi     = numpy.array( _X, dtype=float )
309                         _X_plus_dXi[i]  = _X[i] + _dXi
310                         _X_moins_dXi    = numpy.array( _X, dtype=float )
311                         _X_moins_dXi[i] = _X[i] - _dXi
312                         #
313                         _xserie.append( _X_plus_dXi )
314                         _xserie.append( _X_moins_dXi )
315                     #
316                     _HX_plusmoins_dX = self.DirectOperator( _xserie )
317                      #
318                     _Jacobienne  = []
319                     for i in range( len(_dX) ):
320                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
321                     #
322                 else:
323                     _Jacobienne  = []
324                     for i in range( _dX.size ):
325                         _dXi            = _dX[i]
326                         _X_plus_dXi     = numpy.array( _X, dtype=float )
327                         _X_plus_dXi[i]  = _X[i] + _dXi
328                         _X_moins_dXi    = numpy.array( _X, dtype=float )
329                         _X_moins_dXi[i] = _X[i] - _dXi
330                         #
331                         _HX_plus_dXi    = self.DirectOperator( _X_plus_dXi )
332                         _HX_moins_dXi   = self.DirectOperator( _X_moins_dXi )
333                         #
334                         _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
335                 #
336             else:
337                 #
338                 if self.__mpEnabled and not self.__mfEnabled:
339                     funcrepr = {
340                         "__userFunction__path" : self.__userFunction__path,
341                         "__userFunction__modl" : self.__userFunction__modl,
342                         "__userFunction__name" : self.__userFunction__name,
343                     }
344                     _jobs = []
345                     _jobs.append( (_X, self.__extraArgs, funcrepr) )
346                     for i in range( len(_dX) ):
347                         _X_plus_dXi    = numpy.array( _X, dtype=float )
348                         _X_plus_dXi[i] = _X[i] + _dX[i]
349                         #
350                         _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
351                     #
352                     import multiprocessing
353                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
354                     _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
355                     self.__pool.close()
356                     self.__pool.join()
357                     #
358                     _HX = _HX_plus_dX.pop(0)
359                     #
360                     _Jacobienne = []
361                     for i in range( len(_dX) ):
362                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
363                     #
364                 elif self.__mfEnabled:
365                     _xserie = []
366                     _xserie.append( _X )
367                     for i in range( len(_dX) ):
368                         _X_plus_dXi    = numpy.array( _X, dtype=float )
369                         _X_plus_dXi[i] = _X[i] + _dX[i]
370                         #
371                         _xserie.append( _X_plus_dXi )
372                     #
373                     _HX_plus_dX = self.DirectOperator( _xserie )
374                     #
375                     _HX = _HX_plus_dX.pop(0)
376                     #
377                     _Jacobienne = []
378                     for i in range( len(_dX) ):
379                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
380                    #
381                 else:
382                     _Jacobienne  = []
383                     _HX = self.DirectOperator( _X )
384                     for i in range( _dX.size ):
385                         _dXi            = _dX[i]
386                         _X_plus_dXi     = numpy.array( _X, dtype=float )
387                         _X_plus_dXi[i]  = _X[i] + _dXi
388                         #
389                         _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
390                         #
391                         _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
392             #
393             if (dotWith is not None) or (dotTWith is not None):
394                 __Produit = self.__listdotwith__(_Jacobienne, dotWith, dotTWith)
395             else:
396                 __Produit = None
397             if __Produit is None or self.__avoidRC:
398                 _Jacobienne = numpy.transpose( numpy.vstack( _Jacobienne ) )
399                 if self.__avoidRC:
400                     if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
401                     while len(self.__listJPCP) > self.__lenghtRJ:
402                         self.__listJPCP.pop(0)
403                         self.__listJPCI.pop(0)
404                         self.__listJPCR.pop(0)
405                         self.__listJPPN.pop(0)
406                         self.__listJPIN.pop(0)
407                     self.__listJPCP.append( copy.copy(_X) )
408                     self.__listJPCI.append( copy.copy(_dX) )
409                     self.__listJPCR.append( copy.copy(_Jacobienne) )
410                     self.__listJPPN.append( numpy.linalg.norm(_X) )
411                     self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
412             logging.debug("FDA Fin du calcul de la Jacobienne")
413             if __Produit is not None:
414                 return __Produit
415         #
416         return _Jacobienne
417
418     # ---------------------------------------------------------
419     def TangentOperator(self, paire, **extraArgs ):
420         """
421         Calcul du tangent à l'aide de la Jacobienne.
422
423         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
424         ne doivent pas être données ici à la fonction utilisateur.
425         """
426         if self.__mfEnabled:
427             assert len(paire) == 1, "Incorrect length of arguments"
428             _paire = paire[0]
429             assert len(_paire) == 2, "Incorrect number of arguments"
430         else:
431             assert len(paire) == 2, "Incorrect number of arguments"
432             _paire = paire
433         X, dX = _paire
434         if dX is None or len(dX) == 0:
435             #
436             # Calcul de la forme matricielle si le second argument est None
437             # -------------------------------------------------------------
438             _Jacobienne = self.TangentMatrix( X )
439             if self.__mfEnabled: return [_Jacobienne,]
440             else:                return _Jacobienne
441         else:
442             #
443             # Calcul de la valeur linéarisée de H en X appliqué à dX
444             # ------------------------------------------------------
445             _HtX = self.TangentMatrix( X, dotWith = dX )
446             if self.__mfEnabled: return [_HtX,]
447             else:                return _HtX
448
449     # ---------------------------------------------------------
450     def AdjointOperator(self, paire, **extraArgs ):
451         """
452         Calcul de l'adjoint à l'aide de la Jacobienne.
453
454         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
455         ne doivent pas être données ici à la fonction utilisateur.
456         """
457         if self.__mfEnabled:
458             assert len(paire) == 1, "Incorrect length of arguments"
459             _paire = paire[0]
460             assert len(_paire) == 2, "Incorrect number of arguments"
461         else:
462             assert len(paire) == 2, "Incorrect number of arguments"
463             _paire = paire
464         X, Y = _paire
465         if Y is None or len(Y) == 0:
466             #
467             # Calcul de la forme matricielle si le second argument est None
468             # -------------------------------------------------------------
469             _JacobienneT = self.TangentMatrix( X ).T
470             if self.__mfEnabled: return [_JacobienneT,]
471             else:                return _JacobienneT
472         else:
473             #
474             # Calcul de la valeur de l'adjoint en X appliqué à Y
475             # --------------------------------------------------
476             _HaY = self.TangentMatrix( X, dotTWith = Y )
477             if self.__mfEnabled: return [_HaY,]
478             else:                return _HaY
479
480 # ==============================================================================
481 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
482     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
483     #
484     _bgcenter = numpy.ravel(_bgcenter)[:,None]
485     if _nbmembers < 1:
486         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
487     #
488     if _bgcovariance is None:
489         _Perturbations = numpy.tile( _bgcenter, _nbmembers)
490     else:
491         _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
492         _Perturbations = numpy.tile( _bgcenter, _nbmembers) + _Z
493     #
494     return _Perturbations
495
496 # ==============================================================================
497 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
498     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
499     def __CenteredRandomAnomalies(Zr, N):
500         """
501         Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
502         notes manuscrites de MB et conforme au code de PS avec eps = -1
503         """
504         eps = -1
505         Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
506         Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
507         R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
508         Q = numpy.dot(Q,R)
509         Zr = numpy.dot(Q,Zr)
510         return Zr.T
511     #
512     _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
513     if _nbmembers < 1:
514         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
515     if _bgcovariance is None:
516         _Perturbations = numpy.tile( _bgcenter, _nbmembers)
517     else:
518         if _withSVD:
519             _U, _s, _V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
520             _nbctl = _bgcenter.size
521             if _nbmembers > _nbctl:
522                 _Z = numpy.concatenate((numpy.dot(
523                     numpy.diag(numpy.sqrt(_s[:_nbctl])), _V[:_nbctl]),
524                     numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
525             else:
526                 _Z = numpy.dot(numpy.diag(numpy.sqrt(_s[:_nbmembers-1])), _V[:_nbmembers-1])
527             _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
528             _Perturbations = _bgcenter + _Zca
529         else:
530             if max(abs(_bgcovariance.flatten())) > 0:
531                 _nbctl = _bgcenter.size
532                 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
533                 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
534                 _Perturbations = _bgcenter + _Zca
535             else:
536                 _Perturbations = numpy.tile( _bgcenter, _nbmembers)
537     #
538     return _Perturbations
539
540 # ==============================================================================
541 def EnsembleMean( __Ensemble ):
542     "Renvoie la moyenne empirique d'un ensemble"
543     return numpy.asarray(__Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
544
545 # ==============================================================================
546 def EnsembleOfAnomalies( __Ensemble, __OptMean = None, __Normalisation = 1.):
547     "Renvoie les anomalies centrées à partir d'un ensemble"
548     if __OptMean is None:
549         __Em = EnsembleMean( __Ensemble )
550     else:
551         __Em = numpy.ravel( __OptMean ).reshape((-1,1))
552     #
553     return __Normalisation * (numpy.asarray( __Ensemble ) - __Em)
554
555 # ==============================================================================
556 def EnsembleErrorCovariance( __Ensemble, __quick = False ):
557     "Renvoie l'estimation empirique de la covariance d'ensemble"
558     if __quick:
559         # Covariance rapide mais rarement définie positive
560         __Covariance = numpy.cov( __Ensemble )
561     else:
562         # Résultat souvent identique à numpy.cov, mais plus robuste
563         __n, __m = numpy.asarray( __Ensemble ).shape
564         __Anomalies = EnsembleOfAnomalies( __Ensemble )
565         # Estimation empirique
566         __Covariance = ( __Anomalies @ __Anomalies.T ) / (__m-1)
567         # Assure la symétrie
568         __Covariance = ( __Covariance + __Covariance.T ) * 0.5
569         # Assure la positivité
570         __epsilon    = mpr*numpy.trace( __Covariance )
571         __Covariance = __Covariance + __epsilon * numpy.identity(__n)
572     #
573     return __Covariance
574
575 # ==============================================================================
576 def EnsemblePerturbationWithGivenCovariance( __Ensemble, __Covariance, __Seed=None ):
577     "Ajout d'une perturbation à chaque membre d'un ensemble selon une covariance prescrite"
578     if hasattr(__Covariance,"assparsematrix"):
579         if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance.assparsematrix())/abs(__Ensemble).mean() < mpr).all():
580             # Traitement d'une covariance nulle ou presque
581             return __Ensemble
582         if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance.assparsematrix()) < mpr).all():
583             # Traitement d'une covariance nulle ou presque
584             return __Ensemble
585     else:
586         if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance)/abs(__Ensemble).mean() < mpr).all():
587             # Traitement d'une covariance nulle ou presque
588             return __Ensemble
589         if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance) < mpr).all():
590             # Traitement d'une covariance nulle ou presque
591             return __Ensemble
592     #
593     __n, __m = __Ensemble.shape
594     if __Seed is not None: numpy.random.seed(__Seed)
595     #
596     if hasattr(__Covariance,"isscalar") and __Covariance.isscalar():
597         # Traitement d'une covariance multiple de l'identité
598         __zero = 0.
599         __std  = numpy.sqrt(__Covariance.assparsematrix())
600         __Ensemble += numpy.random.normal(__zero, __std, size=(__m,__n)).T
601     #
602     elif hasattr(__Covariance,"isvector") and __Covariance.isvector():
603         # Traitement d'une covariance diagonale avec variances non identiques
604         __zero = numpy.zeros(__n)
605         __std  = numpy.sqrt(__Covariance.assparsematrix())
606         __Ensemble += numpy.asarray([numpy.random.normal(__zero, __std) for i in range(__m)]).T
607     #
608     elif hasattr(__Covariance,"ismatrix") and __Covariance.ismatrix():
609         # Traitement d'une covariance pleine
610         __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance.asfullmatrix(__n), size=__m).T
611     #
612     elif isinstance(__Covariance, numpy.ndarray):
613         # Traitement d'une covariance numpy pleine, sachant qu'on arrive ici en dernier
614         __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance, size=__m).T
615     #
616     else:
617         raise ValueError("Error in ensemble perturbation with inadequate covariance specification")
618     #
619     return __Ensemble
620
621 # ==============================================================================
622 def CovarianceInflation(
623         InputCovOrEns,
624         InflationType   = None,
625         InflationFactor = None,
626         BackgroundCov   = None,
627         ):
628     """
629     Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
630
631     Synthèse : Hunt 2007, section 2.3.5
632     """
633     if InflationFactor is None:
634         return InputCovOrEns
635     else:
636         InflationFactor = float(InflationFactor)
637     #
638     if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
639         if InflationFactor < 1.:
640             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
641         if InflationFactor < 1.+mpr:
642             return InputCovOrEns
643         OutputCovOrEns = InflationFactor**2 * InputCovOrEns
644     #
645     elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
646         if InflationFactor < 1.:
647             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
648         if InflationFactor < 1.+mpr:
649             return InputCovOrEns
650         InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
651         OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
652             + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
653     #
654     elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
655         if InflationFactor < 0.:
656             raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
657         if InflationFactor < mpr:
658             return InputCovOrEns
659         __n, __m = numpy.asarray(InputCovOrEns).shape
660         if __n != __m:
661             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
662         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
663     #
664     elif InflationType == "HybridOnBackgroundCovariance":
665         if InflationFactor < 0.:
666             raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
667         if InflationFactor < mpr:
668             return InputCovOrEns
669         __n, __m = numpy.asarray(InputCovOrEns).shape
670         if __n != __m:
671             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
672         if BackgroundCov is None:
673             raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
674         if InputCovOrEns.shape != BackgroundCov.shape:
675             raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
676         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
677     #
678     elif InflationType == "Relaxation":
679         raise NotImplementedError("InflationType Relaxation")
680     #
681     else:
682         raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
683     #
684     return OutputCovOrEns
685
686 # ==============================================================================
687 def HessienneEstimation(nb, HaM, HtM, BI, RI):
688     "Estimation de la Hessienne"
689     #
690     HessienneI = []
691     for i in range(int(nb)):
692         _ee    = numpy.zeros((nb,1))
693         _ee[i] = 1.
694         _HtEE  = numpy.dot(HtM,_ee).reshape((-1,1))
695         HessienneI.append( numpy.ravel( BI * _ee + HaM * (RI * _HtEE) ) )
696     #
697     A = numpy.linalg.inv(numpy.array( HessienneI ))
698     #
699     if min(A.shape) != max(A.shape):
700         raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
701     if (numpy.diag(A) < 0).any():
702         raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
703     if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
704         try:
705             L = numpy.linalg.cholesky( A )
706         except:
707             raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
708     #
709     return A
710
711 # ==============================================================================
712 def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None):
713     "Estimation des quantiles a posteriori (selfA est modifié)"
714     nbsamples = selfA._parameters["NumberOfSamplesForQuantiles"]
715     #
716     # Traitement des bornes
717     if "StateBoundsForQuantiles" in selfA._parameters:
718         LBounds = selfA._parameters["StateBoundsForQuantiles"] # Prioritaire
719     elif "Bounds" in selfA._parameters:
720         LBounds = selfA._parameters["Bounds"]  # Défaut raisonnable
721     else:
722         LBounds = None
723     if LBounds is not None:
724         LBounds = ForceNumericBounds( LBounds )
725     _Xa = numpy.ravel(Xa)
726     #
727     # Échantillonnage des états
728     YfQ  = None
729     EXr  = None
730     for i in range(nbsamples):
731         if selfA._parameters["SimulationForQuantiles"] == "Linear" and HtM is not None and HXa is not None:
732             dXr = (numpy.random.multivariate_normal(_Xa,A) - _Xa).reshape((-1,1))
733             if LBounds is not None: # "EstimateProjection" par défaut
734                 dXr = numpy.max(numpy.hstack((dXr,LBounds[:,0].reshape((-1,1))) - Xa),axis=1)
735                 dXr = numpy.min(numpy.hstack((dXr,LBounds[:,1].reshape((-1,1))) - Xa),axis=1)
736             dYr = HtM @ dXr
737             Yr = HXa.reshape((-1,1)) + dYr
738             if selfA._toStore("SampledStateForQuantiles"): Xr = _Xa + numpy.ravel(dXr)
739         elif selfA._parameters["SimulationForQuantiles"] == "NonLinear" and Hm is not None:
740             Xr = numpy.random.multivariate_normal(_Xa,A)
741             if LBounds is not None: # "EstimateProjection" par défaut
742                 Xr = numpy.max(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,0].reshape((-1,1)))),axis=1)
743                 Xr = numpy.min(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,1].reshape((-1,1)))),axis=1)
744             Yr = Hm( Xr )
745         else:
746             raise ValueError("Quantile simulations has only to be Linear or NonLinear.")
747         #
748         if YfQ is None:
749             YfQ = Yr.reshape((-1,1))
750             if selfA._toStore("SampledStateForQuantiles"): EXr = Xr.reshape((-1,1))
751         else:
752             YfQ = numpy.hstack((YfQ,Yr.reshape((-1,1))))
753             if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.hstack((EXr,Xr.reshape((-1,1))))
754     #
755     # Extraction des quantiles
756     YfQ.sort(axis=-1)
757     YQ = None
758     for quantile in selfA._parameters["Quantiles"]:
759         if not (0. <= float(quantile) <= 1.): continue
760         indice = int(nbsamples * float(quantile) - 1./nbsamples)
761         if YQ is None: YQ = YfQ[:,indice].reshape((-1,1))
762         else:          YQ = numpy.hstack((YQ,YfQ[:,indice].reshape((-1,1))))
763     if YQ is not None: # Liste non vide de quantiles
764         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
765     if selfA._toStore("SampledStateForQuantiles"):
766         selfA.StoredVariables["SampledStateForQuantiles"].store( EXr )
767     #
768     return 0
769
770 # ==============================================================================
771 def ForceNumericBounds( __Bounds ):
772     "Force les bornes à être des valeurs numériques, sauf si globalement None"
773     # Conserve une valeur par défaut à None s'il n'y a pas de bornes
774     if __Bounds is None: return None
775     # Converti toutes les bornes individuelles None à +/- l'infini
776     __Bounds = numpy.asarray( __Bounds, dtype=float )
777     if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
778         raise ValueError("Incorrectly shaped bounds data")
779     __Bounds[numpy.isnan(__Bounds[:,0]),0] = -sys.float_info.max
780     __Bounds[numpy.isnan(__Bounds[:,1]),1] =  sys.float_info.max
781     return __Bounds
782
783 # ==============================================================================
784 def RecentredBounds( __Bounds, __Center):
785     "Recentre les bornes autour de 0, sauf si globalement None"
786     # Conserve une valeur par défaut à None s'il n'y a pas de bornes
787     if __Bounds is None: return None
788     # Recentre les valeurs numériques de bornes
789     return ForceNumericBounds( __Bounds ) - numpy.ravel( __Center ).reshape((-1,1))
790
791 # ==============================================================================
792 def ApplyBounds( __Vector, __Bounds, __newClip = True):
793     "Applique des bornes numériques à un point"
794     # Conserve une valeur par défaut s'il n'y a pas de bornes
795     if __Bounds is None: return __Vector
796     #
797     if not isinstance(__Vector, numpy.ndarray): # Is an array
798         raise ValueError("Incorrect array definition of vector data")
799     if not isinstance(__Bounds, numpy.ndarray): # Is an array
800         raise ValueError("Incorrect array definition of bounds data")
801     if 2*__Vector.size != __Bounds.size: # Is a 2 column array of vector lenght
802         raise ValueError("Incorrect bounds number (%i) to be applied for this vector (of size %i)"%(__Bounds.size,__Vector.size))
803     if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
804         raise ValueError("Incorrectly shaped bounds data")
805     #
806     if __newClip:
807         __Vector = __Vector.clip(
808             __Bounds[:,0].reshape(__Vector.shape),
809             __Bounds[:,1].reshape(__Vector.shape),
810             )
811     else:
812         __Vector = numpy.max(numpy.hstack((__Vector.reshape((-1,1)),numpy.asmatrix(__Bounds)[:,0])),axis=1)
813         __Vector = numpy.min(numpy.hstack((__Vector.reshape((-1,1)),numpy.asmatrix(__Bounds)[:,1])),axis=1)
814         __Vector = numpy.asarray(__Vector)
815     #
816     return __Vector
817
818 # ==============================================================================
819 def Apply3DVarRecentringOnEnsemble(__EnXn, __EnXf, __Ynpu, __HO, __R, __B, __Betaf):
820     "Recentre l'ensemble Xn autour de l'analyse 3DVAR"
821     #
822     Xf = EnsembleMean( __EnXf )
823     Pf = Covariance( asCovariance=EnsembleErrorCovariance(__EnXf) )
824     Pf = (1 - __Betaf) * __B + __Betaf * Pf
825     #
826     selfB = PartialAlgorithm("3DVAR")
827     selfB._parameters["Minimizer"] = "LBFGSB"
828     selfB._parameters["MaximumNumberOfSteps"] = 15000
829     selfB._parameters["CostDecrementTolerance"] = 1.e-7
830     selfB._parameters["ProjectedGradientTolerance"] = -1
831     selfB._parameters["GradientNormTolerance"] = 1.e-05
832     selfB._parameters["StoreInternalVariables"] = False
833     selfB._parameters["optiprint"] = -1
834     selfB._parameters["optdisp"] = 0
835     selfB._parameters["Bounds"] = None
836     selfB._parameters["InitializationPoint"] = Xf
837     std3dvar(selfB, Xf, __Ynpu, None, __HO, None, None, __R, Pf, None)
838     Xa = selfB.get("Analysis")[-1].reshape((-1,1))
839     del selfB
840     #
841     return Xa + EnsembleOfAnomalies( __EnXn )
842
843 # ==============================================================================
844 def c2ukf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
845     """
846     Constrained Unscented Kalman Filter
847     """
848     if selfA._parameters["EstimationOf"] == "Parameters":
849         selfA._parameters["StoreInternalVariables"] = True
850     selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
851     #
852     L     = Xb.size
853     Alpha = selfA._parameters["Alpha"]
854     Beta  = selfA._parameters["Beta"]
855     if selfA._parameters["Kappa"] == 0:
856         if selfA._parameters["EstimationOf"] == "State":
857             Kappa = 0
858         elif selfA._parameters["EstimationOf"] == "Parameters":
859             Kappa = 3 - L
860     else:
861         Kappa = selfA._parameters["Kappa"]
862     Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
863     Gamma  = math.sqrt( L + Lambda )
864     #
865     Ww = []
866     Ww.append( 0. )
867     for i in range(2*L):
868         Ww.append( 1. / (2.*(L + Lambda)) )
869     #
870     Wm = numpy.array( Ww )
871     Wm[0] = Lambda / (L + Lambda)
872     Wc = numpy.array( Ww )
873     Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
874     #
875     # Opérateurs
876     Hm = HO["Direct"].appliedControledFormTo
877     #
878     if selfA._parameters["EstimationOf"] == "State":
879         Mm = EM["Direct"].appliedControledFormTo
880     #
881     if CM is not None and "Tangent" in CM and U is not None:
882         Cm = CM["Tangent"].asMatrix(Xb)
883     else:
884         Cm = None
885     #
886     # Durée d'observation et tailles
887     if hasattr(Y,"stepnumber"):
888         duration = Y.stepnumber()
889         __p = numpy.cumprod(Y.shape())[-1]
890     else:
891         duration = 2
892         __p = numpy.array(Y).size
893     #
894     # Précalcul des inversions de B et R
895     if selfA._parameters["StoreInternalVariables"] \
896         or selfA._toStore("CostFunctionJ") \
897         or selfA._toStore("CostFunctionJb") \
898         or selfA._toStore("CostFunctionJo") \
899         or selfA._toStore("CurrentOptimum") \
900         or selfA._toStore("APosterioriCovariance"):
901         BI = B.getI()
902         RI = R.getI()
903     #
904     __n = Xb.size
905     nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
906     #
907     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
908         Xn = Xb
909         if hasattr(B,"asfullmatrix"):
910             Pn = B.asfullmatrix(__n)
911         else:
912             Pn = B
913         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
914         selfA.StoredVariables["Analysis"].store( Xb )
915         if selfA._toStore("APosterioriCovariance"):
916             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
917     elif selfA._parameters["nextStep"]:
918         Xn = selfA._getInternalState("Xn")
919         Pn = selfA._getInternalState("Pn")
920     #
921     if selfA._parameters["EstimationOf"] == "Parameters":
922         XaMin            = Xn
923         previousJMinimum = numpy.finfo(float).max
924     #
925     for step in range(duration-1):
926         if hasattr(Y,"store"):
927             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
928         else:
929             Ynpu = numpy.ravel( Y ).reshape((__p,1))
930         #
931         if U is not None:
932             if hasattr(U,"store") and len(U)>1:
933                 Un = numpy.ravel( U[step] ).reshape((-1,1))
934             elif hasattr(U,"store") and len(U)==1:
935                 Un = numpy.ravel( U[0] ).reshape((-1,1))
936             else:
937                 Un = numpy.ravel( U ).reshape((-1,1))
938         else:
939             Un = None
940         #
941         Pndemi = numpy.real(scipy.linalg.sqrtm(Pn))
942         Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
943         nbSpts = 2*Xn.size+1
944         #
945         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
946             for point in range(nbSpts):
947                 Xnp[:,point] = ApplyBounds( Xnp[:,point], selfA._parameters["Bounds"] )
948         #
949         XEtnnp = []
950         for point in range(nbSpts):
951             if selfA._parameters["EstimationOf"] == "State":
952                 XEtnnpi = numpy.asarray( Mm( (Xnp[:,point], Un) ) ).reshape((-1,1))
953                 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
954                     Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
955                     XEtnnpi = XEtnnpi + Cm @ Un
956                 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
957                     XEtnnpi = ApplyBounds( XEtnnpi, selfA._parameters["Bounds"] )
958             elif selfA._parameters["EstimationOf"] == "Parameters":
959                 # --- > Par principe, M = Id, Q = 0
960                 XEtnnpi = Xnp[:,point]
961             XEtnnp.append( numpy.ravel(XEtnnpi).reshape((-1,1)) )
962         XEtnnp = numpy.concatenate( XEtnnp, axis=1 )
963         #
964         Xncm = ( XEtnnp * Wm ).sum(axis=1)
965         #
966         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
967             Xncm = ApplyBounds( Xncm, selfA._parameters["Bounds"] )
968         #
969         if selfA._parameters["EstimationOf"] == "State":        Pnm = Q
970         elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
971         for point in range(nbSpts):
972             Pnm += Wc[i] * ((XEtnnp[:,point]-Xncm).reshape((-1,1)) * (XEtnnp[:,point]-Xncm))
973         #
974         if selfA._parameters["EstimationOf"] == "Parameters" and selfA._parameters["Bounds"] is not None:
975             Pnmdemi = selfA._parameters["Reconditioner"] * numpy.real(scipy.linalg.sqrtm(Pnm))
976         else:
977             Pnmdemi = numpy.real(scipy.linalg.sqrtm(Pnm))
978         #
979         Xnnp = numpy.hstack([Xncm.reshape((-1,1)), Xncm.reshape((-1,1))+Gamma*Pnmdemi, Xncm.reshape((-1,1))-Gamma*Pnmdemi])
980         #
981         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
982             for point in range(nbSpts):
983                 Xnnp[:,point] = ApplyBounds( Xnnp[:,point], selfA._parameters["Bounds"] )
984         #
985         Ynnp = []
986         for point in range(nbSpts):
987             if selfA._parameters["EstimationOf"] == "State":
988                 Ynnpi = Hm( (Xnnp[:,point], None) )
989             elif selfA._parameters["EstimationOf"] == "Parameters":
990                 Ynnpi = Hm( (Xnnp[:,point], Un) )
991             Ynnp.append( numpy.ravel(Ynnpi).reshape((-1,1)) )
992         Ynnp = numpy.concatenate( Ynnp, axis=1 )
993         #
994         Yncm = ( Ynnp * Wm ).sum(axis=1)
995         #
996         Pyyn = R
997         Pxyn = 0.
998         for point in range(nbSpts):
999             Pyyn += Wc[i] * ((Ynnp[:,point]-Yncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
1000             Pxyn += Wc[i] * ((Xnnp[:,point]-Xncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
1001         #
1002         _Innovation  = Ynpu - Yncm.reshape((-1,1))
1003         if selfA._parameters["EstimationOf"] == "Parameters":
1004             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
1005                 _Innovation = _Innovation - Cm @ Un
1006         #
1007         Kn = Pxyn * Pyyn.I
1008         Xn = Xncm.reshape((-1,1)) + Kn * _Innovation
1009         Pn = Pnm - Kn * Pyyn * Kn.T
1010         #
1011         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1012             Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
1013         #
1014         Xa = Xn # Pointeurs
1015         #--------------------------
1016         selfA._setInternalState("Xn", Xn)
1017         selfA._setInternalState("Pn", Pn)
1018         #--------------------------
1019         #
1020         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1021         # ---> avec analysis
1022         selfA.StoredVariables["Analysis"].store( Xa )
1023         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1024             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) )
1025         if selfA._toStore("InnovationAtCurrentAnalysis"):
1026             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1027         # ---> avec current state
1028         if selfA._parameters["StoreInternalVariables"] \
1029             or selfA._toStore("CurrentState"):
1030             selfA.StoredVariables["CurrentState"].store( Xn )
1031         if selfA._toStore("ForecastState"):
1032             selfA.StoredVariables["ForecastState"].store( Xncm )
1033         if selfA._toStore("ForecastCovariance"):
1034             selfA.StoredVariables["ForecastCovariance"].store( Pnm )
1035         if selfA._toStore("BMA"):
1036             selfA.StoredVariables["BMA"].store( Xncm - Xa )
1037         if selfA._toStore("InnovationAtCurrentState"):
1038             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1039         if selfA._toStore("SimulatedObservationAtCurrentState") \
1040             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1041             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm )
1042         # ---> autres
1043         if selfA._parameters["StoreInternalVariables"] \
1044             or selfA._toStore("CostFunctionJ") \
1045             or selfA._toStore("CostFunctionJb") \
1046             or selfA._toStore("CostFunctionJo") \
1047             or selfA._toStore("CurrentOptimum") \
1048             or selfA._toStore("APosterioriCovariance"):
1049             Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
1050             Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
1051             J   = Jb + Jo
1052             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1053             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1054             selfA.StoredVariables["CostFunctionJ" ].store( J )
1055             #
1056             if selfA._toStore("IndexOfOptimum") \
1057                 or selfA._toStore("CurrentOptimum") \
1058                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1059                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1060                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1061                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1062                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1063             if selfA._toStore("IndexOfOptimum"):
1064                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1065             if selfA._toStore("CurrentOptimum"):
1066                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1067             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1068                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1069             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1070                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1071             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1072                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1073             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1074                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1075         if selfA._toStore("APosterioriCovariance"):
1076             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1077         if selfA._parameters["EstimationOf"] == "Parameters" \
1078             and J < previousJMinimum:
1079             previousJMinimum    = J
1080             XaMin               = Xa
1081             if selfA._toStore("APosterioriCovariance"):
1082                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1083     #
1084     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1085     # ----------------------------------------------------------------------
1086     if selfA._parameters["EstimationOf"] == "Parameters":
1087         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1088         selfA.StoredVariables["Analysis"].store( XaMin )
1089         if selfA._toStore("APosterioriCovariance"):
1090             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1091         if selfA._toStore("BMA"):
1092             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1093     #
1094     return 0
1095
1096 # ==============================================================================
1097 def cekf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1098     """
1099     Contrained Extended Kalman Filter
1100     """
1101     if selfA._parameters["EstimationOf"] == "Parameters":
1102         selfA._parameters["StoreInternalVariables"] = True
1103     selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
1104     #
1105     # Opérateurs
1106     H = HO["Direct"].appliedControledFormTo
1107     #
1108     if selfA._parameters["EstimationOf"] == "State":
1109         M = EM["Direct"].appliedControledFormTo
1110     #
1111     if CM is not None and "Tangent" in CM and U is not None:
1112         Cm = CM["Tangent"].asMatrix(Xb)
1113     else:
1114         Cm = None
1115     #
1116     # Durée d'observation et tailles
1117     if hasattr(Y,"stepnumber"):
1118         duration = Y.stepnumber()
1119         __p = numpy.cumprod(Y.shape())[-1]
1120     else:
1121         duration = 2
1122         __p = numpy.array(Y).size
1123     #
1124     # Précalcul des inversions de B et R
1125     if selfA._parameters["StoreInternalVariables"] \
1126         or selfA._toStore("CostFunctionJ") \
1127         or selfA._toStore("CostFunctionJb") \
1128         or selfA._toStore("CostFunctionJo") \
1129         or selfA._toStore("CurrentOptimum") \
1130         or selfA._toStore("APosterioriCovariance"):
1131         BI = B.getI()
1132         RI = R.getI()
1133     #
1134     __n = Xb.size
1135     nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
1136     #
1137     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1138         Xn = Xb
1139         Pn = B
1140         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1141         selfA.StoredVariables["Analysis"].store( Xb )
1142         if selfA._toStore("APosterioriCovariance"):
1143             if hasattr(B,"asfullmatrix"):
1144                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1145             else:
1146                 selfA.StoredVariables["APosterioriCovariance"].store( B )
1147         selfA._setInternalState("seed", numpy.random.get_state())
1148     elif selfA._parameters["nextStep"]:
1149         Xn = selfA._getInternalState("Xn")
1150         Pn = selfA._getInternalState("Pn")
1151     #
1152     if selfA._parameters["EstimationOf"] == "Parameters":
1153         XaMin            = Xn
1154         previousJMinimum = numpy.finfo(float).max
1155     #
1156     for step in range(duration-1):
1157         if hasattr(Y,"store"):
1158             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1159         else:
1160             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1161         #
1162         Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
1163         Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
1164         Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1165         Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
1166         #
1167         if selfA._parameters["EstimationOf"] == "State":
1168             Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
1169             Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
1170             Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1171             Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
1172         #
1173         if U is not None:
1174             if hasattr(U,"store") and len(U)>1:
1175                 Un = numpy.ravel( U[step] ).reshape((-1,1))
1176             elif hasattr(U,"store") and len(U)==1:
1177                 Un = numpy.ravel( U[0] ).reshape((-1,1))
1178             else:
1179                 Un = numpy.ravel( U ).reshape((-1,1))
1180         else:
1181             Un = None
1182         #
1183         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1184             Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
1185         #
1186         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1187             Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
1188             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1189                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1190                 Xn_predicted = Xn_predicted + Cm @ Un
1191             Pn_predicted = Q + Mt * (Pn * Ma)
1192         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1193             # --- > Par principe, M = Id, Q = 0
1194             Xn_predicted = Xn
1195             Pn_predicted = Pn
1196         #
1197         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1198             Xn_predicted = ApplyBounds( Xn_predicted, selfA._parameters["Bounds"] )
1199         #
1200         if selfA._parameters["EstimationOf"] == "State":
1201             HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
1202             _Innovation  = Ynpu - HX_predicted
1203         elif selfA._parameters["EstimationOf"] == "Parameters":
1204             HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
1205             _Innovation  = Ynpu - HX_predicted
1206             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
1207                 _Innovation = _Innovation - Cm @ Un
1208         #
1209         Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
1210         Xn = Xn_predicted + Kn * _Innovation
1211         Pn = Pn_predicted - Kn * Ht * Pn_predicted
1212         #
1213         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1214             Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
1215         #
1216         Xa = Xn # Pointeurs
1217         #--------------------------
1218         selfA._setInternalState("Xn", Xn)
1219         selfA._setInternalState("Pn", Pn)
1220         #--------------------------
1221         #
1222         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1223         # ---> avec analysis
1224         selfA.StoredVariables["Analysis"].store( Xa )
1225         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1226             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
1227         if selfA._toStore("InnovationAtCurrentAnalysis"):
1228             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1229         # ---> avec current state
1230         if selfA._parameters["StoreInternalVariables"] \
1231             or selfA._toStore("CurrentState"):
1232             selfA.StoredVariables["CurrentState"].store( Xn )
1233         if selfA._toStore("ForecastState"):
1234             selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1235         if selfA._toStore("ForecastCovariance"):
1236             selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
1237         if selfA._toStore("BMA"):
1238             selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
1239         if selfA._toStore("InnovationAtCurrentState"):
1240             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1241         if selfA._toStore("SimulatedObservationAtCurrentState") \
1242             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1243             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1244         # ---> autres
1245         if selfA._parameters["StoreInternalVariables"] \
1246             or selfA._toStore("CostFunctionJ") \
1247             or selfA._toStore("CostFunctionJb") \
1248             or selfA._toStore("CostFunctionJo") \
1249             or selfA._toStore("CurrentOptimum") \
1250             or selfA._toStore("APosterioriCovariance"):
1251             Jb  = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
1252             Jo  = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
1253             J   = Jb + Jo
1254             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1255             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1256             selfA.StoredVariables["CostFunctionJ" ].store( J )
1257             #
1258             if selfA._toStore("IndexOfOptimum") \
1259                 or selfA._toStore("CurrentOptimum") \
1260                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1261                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1262                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1263                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1264                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1265             if selfA._toStore("IndexOfOptimum"):
1266                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1267             if selfA._toStore("CurrentOptimum"):
1268                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1269             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1270                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1271             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1272                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1273             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1274                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1275             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1276                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1277         if selfA._toStore("APosterioriCovariance"):
1278             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1279         if selfA._parameters["EstimationOf"] == "Parameters" \
1280             and J < previousJMinimum:
1281             previousJMinimum    = J
1282             XaMin               = Xa
1283             if selfA._toStore("APosterioriCovariance"):
1284                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1285     #
1286     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1287     # ----------------------------------------------------------------------
1288     if selfA._parameters["EstimationOf"] == "Parameters":
1289         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1290         selfA.StoredVariables["Analysis"].store( XaMin )
1291         if selfA._toStore("APosterioriCovariance"):
1292             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1293         if selfA._toStore("BMA"):
1294             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1295     #
1296     return 0
1297
1298 # ==============================================================================
1299 def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
1300     """
1301     EnKS
1302     """
1303     #
1304     # Opérateurs
1305     H = HO["Direct"].appliedControledFormTo
1306     #
1307     if selfA._parameters["EstimationOf"] == "State":
1308         M = EM["Direct"].appliedControledFormTo
1309     #
1310     if CM is not None and "Tangent" in CM and U is not None:
1311         Cm = CM["Tangent"].asMatrix(Xb)
1312     else:
1313         Cm = None
1314     #
1315     # Précalcul des inversions de B et R
1316     RIdemi = R.sqrtmI()
1317     #
1318     # Durée d'observation et tailles
1319     LagL = selfA._parameters["SmootherLagL"]
1320     if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
1321         raise ValueError("Fixed-lag smoother requires a series of observation")
1322     if Y.stepnumber() < LagL:
1323         raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
1324     duration = Y.stepnumber()
1325     __p = numpy.cumprod(Y.shape())[-1]
1326     __n = Xb.size
1327     __m = selfA._parameters["NumberOfMembers"]
1328     #
1329     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1330         selfA.StoredVariables["Analysis"].store( Xb )
1331         if selfA._toStore("APosterioriCovariance"):
1332             if hasattr(B,"asfullmatrix"):
1333                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1334             else:
1335                 selfA.StoredVariables["APosterioriCovariance"].store( B )
1336     #
1337     # Calcul direct initial (on privilégie la mémorisation au recalcul)
1338     __seed = numpy.random.get_state()
1339     selfB = copy.deepcopy(selfA)
1340     selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
1341     if VariantM == "EnKS16-KalmanFilterFormula":
1342         etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
1343     else:
1344         raise ValueError("VariantM has to be chosen in the authorized methods list.")
1345     if LagL > 0:
1346         EL  = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
1347     else:
1348         EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
1349     selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
1350     #
1351     for step in range(LagL,duration-1):
1352         #
1353         sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
1354         sEL.append(None)
1355         #
1356         if hasattr(Y,"store"):
1357             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1358         else:
1359             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1360         #
1361         if U is not None:
1362             if hasattr(U,"store") and len(U)>1:
1363                 Un = numpy.ravel( U[step] ).reshape((-1,1))
1364             elif hasattr(U,"store") and len(U)==1:
1365                 Un = numpy.ravel( U[0] ).reshape((-1,1))
1366             else:
1367                 Un = numpy.ravel( U ).reshape((-1,1))
1368         else:
1369             Un = None
1370         #
1371         #--------------------------
1372         if VariantM == "EnKS16-KalmanFilterFormula":
1373             if selfA._parameters["EstimationOf"] == "State": # Forecast
1374                 EL = M( [(EL[:,i], Un) for i in range(__m)],
1375                     argsAsSerie = True,
1376                     returnSerieAsArrayMatrix = True )
1377                 EL = EnsemblePerturbationWithGivenCovariance( EL, Q )
1378                 EZ = H( [(EL[:,i], Un) for i in range(__m)],
1379                     argsAsSerie = True,
1380                     returnSerieAsArrayMatrix = True )
1381                 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1382                     Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1383                     EZ = EZ + Cm @ Un
1384             elif selfA._parameters["EstimationOf"] == "Parameters":
1385                 # --- > Par principe, M = Id, Q = 0
1386                 EZ = H( [(EL[:,i], Un) for i in range(__m)],
1387                     argsAsSerie = True,
1388                     returnSerieAsArrayMatrix = True )
1389             #
1390             vEm   = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1391             vZm   = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1392             #
1393             mS    = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
1394             mS    = mS.reshape((-1,__m)) # Pour dimension 1
1395             delta = RIdemi @ ( Ynpu - vZm )
1396             mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
1397             vw    = mT @ mS.T @ delta
1398             #
1399             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
1400             mU    = numpy.identity(__m)
1401             wTU   = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
1402             #
1403             EX    = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
1404             EL    = vEm + EX @ wTU
1405             #
1406             sEL[LagL] = EL
1407             for irl in range(LagL): # Lissage des L précédentes analysis
1408                 vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1409                 EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
1410                 sEL[irl] = vEm + EX @ wTU
1411             #
1412             # Conservation de l'analyse retrospective d'ordre 0 avant rotation
1413             Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1414             if selfA._toStore("APosterioriCovariance"):
1415                 EXn = sEL[0]
1416             #
1417             for irl in range(LagL):
1418                 sEL[irl] = sEL[irl+1]
1419             sEL[LagL] = None
1420         #--------------------------
1421         else:
1422             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1423         #
1424         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1425         # ---> avec analysis
1426         selfA.StoredVariables["Analysis"].store( Xa )
1427         if selfA._toStore("APosterioriCovariance"):
1428             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
1429     #
1430     # Stockage des dernières analyses incomplètement remises à jour
1431     for irl in range(LagL):
1432         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1433         Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1434         selfA.StoredVariables["Analysis"].store( Xa )
1435     #
1436     return 0
1437
1438 # ==============================================================================
1439 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
1440     VariantM="KalmanFilterFormula",
1441     Hybrid=None,
1442     ):
1443     """
1444     Ensemble-Transform EnKF
1445     """
1446     if selfA._parameters["EstimationOf"] == "Parameters":
1447         selfA._parameters["StoreInternalVariables"] = True
1448     #
1449     # Opérateurs
1450     H = HO["Direct"].appliedControledFormTo
1451     #
1452     if selfA._parameters["EstimationOf"] == "State":
1453         M = EM["Direct"].appliedControledFormTo
1454     #
1455     if CM is not None and "Tangent" in CM and U is not None:
1456         Cm = CM["Tangent"].asMatrix(Xb)
1457     else:
1458         Cm = None
1459     #
1460     # Durée d'observation et tailles
1461     if hasattr(Y,"stepnumber"):
1462         duration = Y.stepnumber()
1463         __p = numpy.cumprod(Y.shape())[-1]
1464     else:
1465         duration = 2
1466         __p = numpy.array(Y).size
1467     #
1468     # Précalcul des inversions de B et R
1469     if selfA._parameters["StoreInternalVariables"] \
1470         or selfA._toStore("CostFunctionJ") \
1471         or selfA._toStore("CostFunctionJb") \
1472         or selfA._toStore("CostFunctionJo") \
1473         or selfA._toStore("CurrentOptimum") \
1474         or selfA._toStore("APosterioriCovariance"):
1475         BI = B.getI()
1476         RI = R.getI()
1477     elif VariantM != "KalmanFilterFormula":
1478         RI = R.getI()
1479     if VariantM == "KalmanFilterFormula":
1480         RIdemi = R.sqrtmI()
1481     #
1482     __n = Xb.size
1483     __m = selfA._parameters["NumberOfMembers"]
1484     nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
1485     previousJMinimum = numpy.finfo(float).max
1486     #
1487     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1488         Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1489         selfA.StoredVariables["Analysis"].store( Xb )
1490         if selfA._toStore("APosterioriCovariance"):
1491             if hasattr(B,"asfullmatrix"):
1492                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1493             else:
1494                 selfA.StoredVariables["APosterioriCovariance"].store( B )
1495         selfA._setInternalState("seed", numpy.random.get_state())
1496     elif selfA._parameters["nextStep"]:
1497         Xn = selfA._getInternalState("Xn")
1498     #
1499     for step in range(duration-1):
1500         numpy.random.set_state(selfA._getInternalState("seed"))
1501         if hasattr(Y,"store"):
1502             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1503         else:
1504             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1505         #
1506         if U is not None:
1507             if hasattr(U,"store") and len(U)>1:
1508                 Un = numpy.ravel( U[step] ).reshape((-1,1))
1509             elif hasattr(U,"store") and len(U)==1:
1510                 Un = numpy.ravel( U[0] ).reshape((-1,1))
1511             else:
1512                 Un = numpy.ravel( U ).reshape((-1,1))
1513         else:
1514             Un = None
1515         #
1516         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1517             Xn = CovarianceInflation( Xn,
1518                 selfA._parameters["InflationType"],
1519                 selfA._parameters["InflationFactor"],
1520                 )
1521         #
1522         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1523             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1524                 argsAsSerie = True,
1525                 returnSerieAsArrayMatrix = True )
1526             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
1527             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1528                 argsAsSerie = True,
1529                 returnSerieAsArrayMatrix = True )
1530             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1531                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1532                 Xn_predicted = Xn_predicted + Cm @ Un
1533         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1534             # --- > Par principe, M = Id, Q = 0
1535             Xn_predicted = EMX = Xn
1536             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1537                 argsAsSerie = True,
1538                 returnSerieAsArrayMatrix = True )
1539         #
1540         # Mean of forecast and observation of forecast
1541         Xfm  = EnsembleMean( Xn_predicted )
1542         Hfm  = EnsembleMean( HX_predicted )
1543         #
1544         # Anomalies
1545         EaX   = EnsembleOfAnomalies( Xn_predicted, Xfm )
1546         EaHX  = EnsembleOfAnomalies( HX_predicted, Hfm)
1547         #
1548         #--------------------------
1549         if VariantM == "KalmanFilterFormula":
1550             mS    = RIdemi * EaHX / math.sqrt(__m-1)
1551             mS    = mS.reshape((-1,__m)) # Pour dimension 1
1552             delta = RIdemi * ( Ynpu - Hfm )
1553             mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
1554             vw    = mT @ mS.T @ delta
1555             #
1556             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
1557             mU    = numpy.identity(__m)
1558             #
1559             EaX   = EaX / math.sqrt(__m-1)
1560             Xn    = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
1561         #--------------------------
1562         elif VariantM == "Variational":
1563             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1564             def CostFunction(w):
1565                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1566                 _Jo = 0.5 * _A.T @ (RI * _A)
1567                 _Jb = 0.5 * (__m-1) * w.T @ w
1568                 _J  = _Jo + _Jb
1569                 return float(_J)
1570             def GradientOfCostFunction(w):
1571                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1572                 _GardJo = - EaHX.T @ (RI * _A)
1573                 _GradJb = (__m-1) * w.reshape((__m,1))
1574                 _GradJ  = _GardJo + _GradJb
1575                 return numpy.ravel(_GradJ)
1576             vw = scipy.optimize.fmin_cg(
1577                 f           = CostFunction,
1578                 x0          = numpy.zeros(__m),
1579                 fprime      = GradientOfCostFunction,
1580                 args        = (),
1581                 disp        = False,
1582                 )
1583             #
1584             Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1585             Htb = (__m-1) * numpy.identity(__m)
1586             Hta = Hto + Htb
1587             #
1588             Pta = numpy.linalg.inv( Hta )
1589             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1590             #
1591             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
1592         #--------------------------
1593         elif VariantM == "FiniteSize11": # Jauge Boc2011
1594             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1595             def CostFunction(w):
1596                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1597                 _Jo = 0.5 * _A.T @ (RI * _A)
1598                 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
1599                 _J  = _Jo + _Jb
1600                 return float(_J)
1601             def GradientOfCostFunction(w):
1602                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1603                 _GardJo = - EaHX.T @ (RI * _A)
1604                 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1605                 _GradJ  = _GardJo + _GradJb
1606                 return numpy.ravel(_GradJ)
1607             vw = scipy.optimize.fmin_cg(
1608                 f           = CostFunction,
1609                 x0          = numpy.zeros(__m),
1610                 fprime      = GradientOfCostFunction,
1611                 args        = (),
1612                 disp        = False,
1613                 )
1614             #
1615             Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1616             Htb = __m * \
1617                 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1618                 / (1 + 1/__m + vw.T @ vw)**2
1619             Hta = Hto + Htb
1620             #
1621             Pta = numpy.linalg.inv( Hta )
1622             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1623             #
1624             Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1625         #--------------------------
1626         elif VariantM == "FiniteSize15": # Jauge Boc2015
1627             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1628             def CostFunction(w):
1629                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1630                 _Jo = 0.5 * _A.T * (RI * _A)
1631                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
1632                 _J  = _Jo + _Jb
1633                 return float(_J)
1634             def GradientOfCostFunction(w):
1635                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1636                 _GardJo = - EaHX.T @ (RI * _A)
1637                 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1638                 _GradJ  = _GardJo + _GradJb
1639                 return numpy.ravel(_GradJ)
1640             vw = scipy.optimize.fmin_cg(
1641                 f           = CostFunction,
1642                 x0          = numpy.zeros(__m),
1643                 fprime      = GradientOfCostFunction,
1644                 args        = (),
1645                 disp        = False,
1646                 )
1647             #
1648             Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1649             Htb = (__m+1) * \
1650                 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1651                 / (1 + 1/__m + vw.T @ vw)**2
1652             Hta = Hto + Htb
1653             #
1654             Pta = numpy.linalg.inv( Hta )
1655             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1656             #
1657             Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1658         #--------------------------
1659         elif VariantM == "FiniteSize16": # Jauge Boc2016
1660             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1661             def CostFunction(w):
1662                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1663                 _Jo = 0.5 * _A.T @ (RI * _A)
1664                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
1665                 _J  = _Jo + _Jb
1666                 return float(_J)
1667             def GradientOfCostFunction(w):
1668                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1669                 _GardJo = - EaHX.T @ (RI * _A)
1670                 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
1671                 _GradJ  = _GardJo + _GradJb
1672                 return numpy.ravel(_GradJ)
1673             vw = scipy.optimize.fmin_cg(
1674                 f           = CostFunction,
1675                 x0          = numpy.zeros(__m),
1676                 fprime      = GradientOfCostFunction,
1677                 args        = (),
1678                 disp        = False,
1679                 )
1680             #
1681             Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1682             Htb = ((__m+1) / (__m-1)) * \
1683                 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
1684                 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
1685             Hta = Hto + Htb
1686             #
1687             Pta = numpy.linalg.inv( Hta )
1688             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1689             #
1690             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
1691         #--------------------------
1692         else:
1693             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1694         #
1695         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1696             Xn = CovarianceInflation( Xn,
1697                 selfA._parameters["InflationType"],
1698                 selfA._parameters["InflationFactor"],
1699                 )
1700         #
1701         if Hybrid == "E3DVAR":
1702             betaf = selfA._parameters["HybridCovarianceEquilibrium"]
1703             Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
1704         #
1705         Xa = EnsembleMean( Xn )
1706         #--------------------------
1707         selfA._setInternalState("Xn", Xn)
1708         selfA._setInternalState("seed", numpy.random.get_state())
1709         #--------------------------
1710         #
1711         if selfA._parameters["StoreInternalVariables"] \
1712             or selfA._toStore("CostFunctionJ") \
1713             or selfA._toStore("CostFunctionJb") \
1714             or selfA._toStore("CostFunctionJo") \
1715             or selfA._toStore("APosterioriCovariance") \
1716             or selfA._toStore("InnovationAtCurrentAnalysis") \
1717             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1718             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1719             _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
1720             _Innovation = Ynpu - _HXa
1721         #
1722         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1723         # ---> avec analysis
1724         selfA.StoredVariables["Analysis"].store( Xa )
1725         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1726             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1727         if selfA._toStore("InnovationAtCurrentAnalysis"):
1728             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1729         # ---> avec current state
1730         if selfA._parameters["StoreInternalVariables"] \
1731             or selfA._toStore("CurrentState"):
1732             selfA.StoredVariables["CurrentState"].store( Xn )
1733         if selfA._toStore("ForecastState"):
1734             selfA.StoredVariables["ForecastState"].store( EMX )
1735         if selfA._toStore("ForecastCovariance"):
1736             selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
1737         if selfA._toStore("BMA"):
1738             selfA.StoredVariables["BMA"].store( EMX - Xa )
1739         if selfA._toStore("InnovationAtCurrentState"):
1740             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1741         if selfA._toStore("SimulatedObservationAtCurrentState") \
1742             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1743             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1744         # ---> autres
1745         if selfA._parameters["StoreInternalVariables"] \
1746             or selfA._toStore("CostFunctionJ") \
1747             or selfA._toStore("CostFunctionJb") \
1748             or selfA._toStore("CostFunctionJo") \
1749             or selfA._toStore("CurrentOptimum") \
1750             or selfA._toStore("APosterioriCovariance"):
1751             Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
1752             Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
1753             J   = Jb + Jo
1754             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1755             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1756             selfA.StoredVariables["CostFunctionJ" ].store( J )
1757             #
1758             if selfA._toStore("IndexOfOptimum") \
1759                 or selfA._toStore("CurrentOptimum") \
1760                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1761                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1762                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1763                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1764                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1765             if selfA._toStore("IndexOfOptimum"):
1766                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1767             if selfA._toStore("CurrentOptimum"):
1768                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1769             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1770                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1771             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1772                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1773             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1774                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1775             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1776                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1777         if selfA._toStore("APosterioriCovariance"):
1778             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1779         if selfA._parameters["EstimationOf"] == "Parameters" \
1780             and J < previousJMinimum:
1781             previousJMinimum    = J
1782             XaMin               = Xa
1783             if selfA._toStore("APosterioriCovariance"):
1784                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1785         # ---> Pour les smoothers
1786         if selfA._toStore("CurrentEnsembleState"):
1787             selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
1788     #
1789     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1790     # ----------------------------------------------------------------------
1791     if selfA._parameters["EstimationOf"] == "Parameters":
1792         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1793         selfA.StoredVariables["Analysis"].store( XaMin )
1794         if selfA._toStore("APosterioriCovariance"):
1795             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1796         if selfA._toStore("BMA"):
1797             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1798     #
1799     return 0
1800
1801 # ==============================================================================
1802 def exkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1803     """
1804     Extended Kalman Filter
1805     """
1806     if selfA._parameters["EstimationOf"] == "Parameters":
1807         selfA._parameters["StoreInternalVariables"] = True
1808     #
1809     # Opérateurs
1810     H = HO["Direct"].appliedControledFormTo
1811     #
1812     if selfA._parameters["EstimationOf"] == "State":
1813         M = EM["Direct"].appliedControledFormTo
1814     #
1815     if CM is not None and "Tangent" in CM and U is not None:
1816         Cm = CM["Tangent"].asMatrix(Xb)
1817     else:
1818         Cm = None
1819     #
1820     # Durée d'observation et tailles
1821     if hasattr(Y,"stepnumber"):
1822         duration = Y.stepnumber()
1823         __p = numpy.cumprod(Y.shape())[-1]
1824     else:
1825         duration = 2
1826         __p = numpy.array(Y).size
1827     #
1828     # Précalcul des inversions de B et R
1829     if selfA._parameters["StoreInternalVariables"] \
1830         or selfA._toStore("CostFunctionJ") \
1831         or selfA._toStore("CostFunctionJb") \
1832         or selfA._toStore("CostFunctionJo") \
1833         or selfA._toStore("CurrentOptimum") \
1834         or selfA._toStore("APosterioriCovariance"):
1835         BI = B.getI()
1836         RI = R.getI()
1837     #
1838     __n = Xb.size
1839     nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
1840     #
1841     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1842         Xn = Xb
1843         Pn = B
1844         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1845         selfA.StoredVariables["Analysis"].store( Xb )
1846         if selfA._toStore("APosterioriCovariance"):
1847             if hasattr(B,"asfullmatrix"):
1848                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1849             else:
1850                 selfA.StoredVariables["APosterioriCovariance"].store( B )
1851         selfA._setInternalState("seed", numpy.random.get_state())
1852     elif selfA._parameters["nextStep"]:
1853         Xn = selfA._getInternalState("Xn")
1854         Pn = selfA._getInternalState("Pn")
1855     #
1856     if selfA._parameters["EstimationOf"] == "Parameters":
1857         XaMin            = Xn
1858         previousJMinimum = numpy.finfo(float).max
1859     #
1860     for step in range(duration-1):
1861         if hasattr(Y,"store"):
1862             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1863         else:
1864             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1865         #
1866         Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
1867         Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
1868         Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1869         Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
1870         #
1871         if selfA._parameters["EstimationOf"] == "State":
1872             Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
1873             Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
1874             Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1875             Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
1876         #
1877         if U is not None:
1878             if hasattr(U,"store") and len(U)>1:
1879                 Un = numpy.ravel( U[step] ).reshape((-1,1))
1880             elif hasattr(U,"store") and len(U)==1:
1881                 Un = numpy.ravel( U[0] ).reshape((-1,1))
1882             else:
1883                 Un = numpy.ravel( U ).reshape((-1,1))
1884         else:
1885             Un = None
1886         #
1887         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1888             Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
1889             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1890                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1891                 Xn_predicted = Xn_predicted + Cm @ Un
1892             Pn_predicted = Q + Mt * (Pn * Ma)
1893         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1894             # --- > Par principe, M = Id, Q = 0
1895             Xn_predicted = Xn
1896             Pn_predicted = Pn
1897         #
1898         if selfA._parameters["EstimationOf"] == "State":
1899             HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
1900             _Innovation  = Ynpu - HX_predicted
1901         elif selfA._parameters["EstimationOf"] == "Parameters":
1902             HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
1903             _Innovation  = Ynpu - HX_predicted
1904             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
1905                 _Innovation = _Innovation - Cm @ Un
1906         #
1907         Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
1908         Xn = Xn_predicted + Kn * _Innovation
1909         Pn = Pn_predicted - Kn * Ht * Pn_predicted
1910         #
1911         Xa = Xn # Pointeurs
1912         #--------------------------
1913         selfA._setInternalState("Xn", Xn)
1914         selfA._setInternalState("Pn", Pn)
1915         #--------------------------
1916         #
1917         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1918         # ---> avec analysis
1919         selfA.StoredVariables["Analysis"].store( Xa )
1920         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1921             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
1922         if selfA._toStore("InnovationAtCurrentAnalysis"):
1923             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1924         # ---> avec current state
1925         if selfA._parameters["StoreInternalVariables"] \
1926             or selfA._toStore("CurrentState"):
1927             selfA.StoredVariables["CurrentState"].store( Xn )
1928         if selfA._toStore("ForecastState"):
1929             selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1930         if selfA._toStore("ForecastCovariance"):
1931             selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
1932         if selfA._toStore("BMA"):
1933             selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
1934         if selfA._toStore("InnovationAtCurrentState"):
1935             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1936         if selfA._toStore("SimulatedObservationAtCurrentState") \
1937             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1938             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1939         # ---> autres
1940         if selfA._parameters["StoreInternalVariables"] \
1941             or selfA._toStore("CostFunctionJ") \
1942             or selfA._toStore("CostFunctionJb") \
1943             or selfA._toStore("CostFunctionJo") \
1944             or selfA._toStore("CurrentOptimum") \
1945             or selfA._toStore("APosterioriCovariance"):
1946             Jb  = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
1947             Jo  = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
1948             J   = Jb + Jo
1949             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1950             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1951             selfA.StoredVariables["CostFunctionJ" ].store( J )
1952             #
1953             if selfA._toStore("IndexOfOptimum") \
1954                 or selfA._toStore("CurrentOptimum") \
1955                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1956                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1957                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1958                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1959                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1960             if selfA._toStore("IndexOfOptimum"):
1961                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1962             if selfA._toStore("CurrentOptimum"):
1963                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1964             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1965                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1966             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1967                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1968             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1969                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1970             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1971                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1972         if selfA._toStore("APosterioriCovariance"):
1973             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1974         if selfA._parameters["EstimationOf"] == "Parameters" \
1975             and J < previousJMinimum:
1976             previousJMinimum    = J
1977             XaMin               = Xa
1978             if selfA._toStore("APosterioriCovariance"):
1979                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1980     #
1981     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1982     # ----------------------------------------------------------------------
1983     if selfA._parameters["EstimationOf"] == "Parameters":
1984         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1985         selfA.StoredVariables["Analysis"].store( XaMin )
1986         if selfA._toStore("APosterioriCovariance"):
1987             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1988         if selfA._toStore("BMA"):
1989             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1990     #
1991     return 0
1992
1993 # ==============================================================================
1994 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
1995     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1996     """
1997     Iterative EnKF
1998     """
1999     if selfA._parameters["EstimationOf"] == "Parameters":
2000         selfA._parameters["StoreInternalVariables"] = True
2001     #
2002     # Opérateurs
2003     H = HO["Direct"].appliedControledFormTo
2004     #
2005     if selfA._parameters["EstimationOf"] == "State":
2006         M = EM["Direct"].appliedControledFormTo
2007     #
2008     if CM is not None and "Tangent" in CM and U is not None:
2009         Cm = CM["Tangent"].asMatrix(Xb)
2010     else:
2011         Cm = None
2012     #
2013     # Durée d'observation et tailles
2014     if hasattr(Y,"stepnumber"):
2015         duration = Y.stepnumber()
2016         __p = numpy.cumprod(Y.shape())[-1]
2017     else:
2018         duration = 2
2019         __p = numpy.array(Y).size
2020     #
2021     # Précalcul des inversions de B et R
2022     if selfA._parameters["StoreInternalVariables"] \
2023         or selfA._toStore("CostFunctionJ") \
2024         or selfA._toStore("CostFunctionJb") \
2025         or selfA._toStore("CostFunctionJo") \
2026         or selfA._toStore("CurrentOptimum") \
2027         or selfA._toStore("APosterioriCovariance"):
2028         BI = B.getI()
2029     RI = R.getI()
2030     #
2031     __n = Xb.size
2032     __m = selfA._parameters["NumberOfMembers"]
2033     nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
2034     previousJMinimum = numpy.finfo(float).max
2035     #
2036     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2037         if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2038         else:                         Pn = B
2039         Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
2040         selfA.StoredVariables["Analysis"].store( Xb )
2041         if selfA._toStore("APosterioriCovariance"):
2042             if hasattr(B,"asfullmatrix"):
2043                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
2044             else:
2045                 selfA.StoredVariables["APosterioriCovariance"].store( B )
2046         selfA._setInternalState("seed", numpy.random.get_state())
2047     elif selfA._parameters["nextStep"]:
2048         Xn = selfA._getInternalState("Xn")
2049     #
2050     for step in range(duration-1):
2051         numpy.random.set_state(selfA._getInternalState("seed"))
2052         if hasattr(Y,"store"):
2053             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2054         else:
2055             Ynpu = numpy.ravel( Y ).reshape((__p,1))
2056         #
2057         if U is not None:
2058             if hasattr(U,"store") and len(U)>1:
2059                 Un = numpy.ravel( U[step] ).reshape((-1,1))
2060             elif hasattr(U,"store") and len(U)==1:
2061                 Un = numpy.ravel( U[0] ).reshape((-1,1))
2062             else:
2063                 Un = numpy.ravel( U ).reshape((-1,1))
2064         else:
2065             Un = None
2066         #
2067         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2068             Xn = CovarianceInflation( Xn,
2069                 selfA._parameters["InflationType"],
2070                 selfA._parameters["InflationFactor"],
2071                 )
2072         #
2073         #--------------------------
2074         if VariantM == "IEnKF12":
2075             Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
2076             EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
2077             __j = 0
2078             Deltaw = 1
2079             if not BnotT:
2080                 Ta  = numpy.identity(__m)
2081             vw  = numpy.zeros(__m)
2082             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2083                 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
2084                 #
2085                 if BnotT:
2086                     E1 = vx1 + _epsilon * EaX
2087                 else:
2088                     E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
2089                 #
2090                 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
2091                     E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2092                         argsAsSerie = True,
2093                         returnSerieAsArrayMatrix = True )
2094                 elif selfA._parameters["EstimationOf"] == "Parameters":
2095                     # --- > Par principe, M = Id
2096                     E2 = Xn
2097                 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2098                 vy1 = H((vx2, Un)).reshape((__p,1))
2099                 #
2100                 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
2101                     argsAsSerie = True,
2102                     returnSerieAsArrayMatrix = True )
2103                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2104                 #
2105                 if BnotT:
2106                     EaY = (HE2 - vy2) / _epsilon
2107                 else:
2108                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
2109                 #
2110                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
2111                 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
2112                 Deltaw = - numpy.linalg.solve(mH,GradJ)
2113                 #
2114                 vw = vw + Deltaw
2115                 #
2116                 if not BnotT:
2117                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2118                 #
2119                 __j = __j + 1
2120             #
2121             A2 = EnsembleOfAnomalies( E2 )
2122             #
2123             if BnotT:
2124                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2125                 A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
2126             #
2127             Xn = vx2 + A2
2128         #--------------------------
2129         else:
2130             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2131         #
2132         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2133             Xn = CovarianceInflation( Xn,
2134                 selfA._parameters["InflationType"],
2135                 selfA._parameters["InflationFactor"],
2136                 )
2137         #
2138         Xa = EnsembleMean( Xn )
2139         #--------------------------
2140         selfA._setInternalState("Xn", Xn)
2141         selfA._setInternalState("seed", numpy.random.get_state())
2142         #--------------------------
2143         #
2144         if selfA._parameters["StoreInternalVariables"] \
2145             or selfA._toStore("CostFunctionJ") \
2146             or selfA._toStore("CostFunctionJb") \
2147             or selfA._toStore("CostFunctionJo") \
2148             or selfA._toStore("APosterioriCovariance") \
2149             or selfA._toStore("InnovationAtCurrentAnalysis") \
2150             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2151             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2152             _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
2153             _Innovation = Ynpu - _HXa
2154         #
2155         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2156         # ---> avec analysis
2157         selfA.StoredVariables["Analysis"].store( Xa )
2158         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2159             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2160         if selfA._toStore("InnovationAtCurrentAnalysis"):
2161             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2162         # ---> avec current state
2163         if selfA._parameters["StoreInternalVariables"] \
2164             or selfA._toStore("CurrentState"):
2165             selfA.StoredVariables["CurrentState"].store( Xn )
2166         if selfA._toStore("ForecastState"):
2167             selfA.StoredVariables["ForecastState"].store( E2 )
2168         if selfA._toStore("ForecastCovariance"):
2169             selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(E2) )
2170         if selfA._toStore("BMA"):
2171             selfA.StoredVariables["BMA"].store( E2 - Xa )
2172         if selfA._toStore("InnovationAtCurrentState"):
2173             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
2174         if selfA._toStore("SimulatedObservationAtCurrentState") \
2175             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2176             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2177         # ---> autres
2178         if selfA._parameters["StoreInternalVariables"] \
2179             or selfA._toStore("CostFunctionJ") \
2180             or selfA._toStore("CostFunctionJb") \
2181             or selfA._toStore("CostFunctionJo") \
2182             or selfA._toStore("CurrentOptimum") \
2183             or selfA._toStore("APosterioriCovariance"):
2184             Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
2185             Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
2186             J   = Jb + Jo
2187             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2188             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2189             selfA.StoredVariables["CostFunctionJ" ].store( J )
2190             #
2191             if selfA._toStore("IndexOfOptimum") \
2192                 or selfA._toStore("CurrentOptimum") \
2193                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2194                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2195                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2196                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2197                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2198             if selfA._toStore("IndexOfOptimum"):
2199                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2200             if selfA._toStore("CurrentOptimum"):
2201                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2202             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2203                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2204             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2205                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2206             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2207                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2208             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2209                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2210         if selfA._toStore("APosterioriCovariance"):
2211             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2212         if selfA._parameters["EstimationOf"] == "Parameters" \
2213             and J < previousJMinimum:
2214             previousJMinimum    = J
2215             XaMin               = Xa
2216             if selfA._toStore("APosterioriCovariance"):
2217                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
2218         # ---> Pour les smoothers
2219         if selfA._toStore("CurrentEnsembleState"):
2220             selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
2221     #
2222     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2223     # ----------------------------------------------------------------------
2224     if selfA._parameters["EstimationOf"] == "Parameters":
2225         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2226         selfA.StoredVariables["Analysis"].store( XaMin )
2227         if selfA._toStore("APosterioriCovariance"):
2228             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2229         if selfA._toStore("BMA"):
2230             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2231     #
2232     return 0
2233
2234 # ==============================================================================
2235 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2236     """
2237     3DVAR incrémental
2238     """
2239     #
2240     # Initialisations
2241     # ---------------
2242     Hm = HO["Direct"].appliedTo
2243     #
2244     BI = B.getI()
2245     RI = R.getI()
2246     #
2247     Xini = selfA._parameters["InitializationPoint"]
2248     #
2249     HXb = numpy.ravel( Hm( Xb ) ).reshape((-1,1))
2250     Innovation = Y - HXb
2251     #
2252     # Outer Loop
2253     # ----------
2254     iOuter = 0
2255     J      = 1./mpr
2256     DeltaJ = 1./mpr
2257     Xr     = Xini.reshape((-1,1))
2258     while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
2259         #
2260         # Inner Loop
2261         # ----------
2262         Ht = HO["Tangent"].asMatrix(Xr)
2263         Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
2264         #
2265         # Définition de la fonction-coût
2266         # ------------------------------
2267         def CostFunction(dx):
2268             _dX  = numpy.ravel( dx ).reshape((-1,1))
2269             if selfA._parameters["StoreInternalVariables"] or \
2270                 selfA._toStore("CurrentState") or \
2271                 selfA._toStore("CurrentOptimum"):
2272                 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
2273             _HdX = Ht @ _dX
2274             _HdX = numpy.ravel( _HdX ).reshape((-1,1))
2275             _dInnovation = Innovation - _HdX
2276             if selfA._toStore("SimulatedObservationAtCurrentState") or \
2277                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2278                 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
2279             if selfA._toStore("InnovationAtCurrentState"):
2280                 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
2281             #
2282             Jb  = float( 0.5 * _dX.T * (BI * _dX) )
2283             Jo  = float( 0.5 * _dInnovation.T * (RI * _dInnovation) )
2284             J   = Jb + Jo
2285             #
2286             selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2287             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2288             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2289             selfA.StoredVariables["CostFunctionJ" ].store( J )
2290             if selfA._toStore("IndexOfOptimum") or \
2291                 selfA._toStore("CurrentOptimum") or \
2292                 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2293                 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2294                 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2295                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2296                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2297             if selfA._toStore("IndexOfOptimum"):
2298                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2299             if selfA._toStore("CurrentOptimum"):
2300                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2301             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2302                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2303             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2304                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2305             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2306                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2307             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2308                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2309             return J
2310         #
2311         def GradientOfCostFunction(dx):
2312             _dX          = numpy.ravel( dx )
2313             _HdX         = Ht @ _dX
2314             _HdX         = numpy.ravel( _HdX ).reshape((-1,1))
2315             _dInnovation = Innovation - _HdX
2316             GradJb       = BI @ _dX
2317             GradJo       = - Ht.T @ (RI * _dInnovation)
2318             GradJ        = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2319             return GradJ
2320         #
2321         # Minimisation de la fonctionnelle
2322         # --------------------------------
2323         nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2324         #
2325         if selfA._parameters["Minimizer"] == "LBFGSB":
2326             # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
2327             if "0.19" <= scipy.version.version <= "1.1.0":
2328                 import lbfgsbhlt as optimiseur
2329             else:
2330                 import scipy.optimize as optimiseur
2331             Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2332                 func        = CostFunction,
2333                 x0          = numpy.zeros(Xini.size),
2334                 fprime      = GradientOfCostFunction,
2335                 args        = (),
2336                 bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
2337                 maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
2338                 factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
2339                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2340                 iprint      = selfA._parameters["optiprint"],
2341                 )
2342             nfeval = Informations['funcalls']
2343             rc     = Informations['warnflag']
2344         elif selfA._parameters["Minimizer"] == "TNC":
2345             Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2346                 func        = CostFunction,
2347                 x0          = numpy.zeros(Xini.size),
2348                 fprime      = GradientOfCostFunction,
2349                 args        = (),
2350                 bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
2351                 maxfun      = selfA._parameters["MaximumNumberOfSteps"],
2352                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2353                 ftol        = selfA._parameters["CostDecrementTolerance"],
2354                 messages    = selfA._parameters["optmessages"],
2355                 )
2356         elif selfA._parameters["Minimizer"] == "CG":
2357             Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2358                 f           = CostFunction,
2359                 x0          = numpy.zeros(Xini.size),
2360                 fprime      = GradientOfCostFunction,
2361                 args        = (),
2362                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2363                 gtol        = selfA._parameters["GradientNormTolerance"],
2364                 disp        = selfA._parameters["optdisp"],
2365                 full_output = True,
2366                 )
2367         elif selfA._parameters["Minimizer"] == "NCG":
2368             Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2369                 f           = CostFunction,
2370                 x0          = numpy.zeros(Xini.size),
2371                 fprime      = GradientOfCostFunction,
2372                 args        = (),
2373                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2374                 avextol     = selfA._parameters["CostDecrementTolerance"],
2375                 disp        = selfA._parameters["optdisp"],
2376                 full_output = True,
2377                 )
2378         elif selfA._parameters["Minimizer"] == "BFGS":
2379             Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2380                 f           = CostFunction,
2381                 x0          = numpy.zeros(Xini.size),
2382                 fprime      = GradientOfCostFunction,
2383                 args        = (),
2384                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2385                 gtol        = selfA._parameters["GradientNormTolerance"],
2386                 disp        = selfA._parameters["optdisp"],
2387                 full_output = True,
2388                 )
2389         else:
2390             raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2391         #
2392         IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2393         MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2394         #
2395         if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2396             Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2397         else:
2398             Minimum = Xb + Minimum.reshape((-1,1))
2399         #
2400         Xr     = Minimum
2401         DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
2402         iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
2403     #
2404     Xa = Xr
2405     #--------------------------
2406     #
2407     selfA.StoredVariables["Analysis"].store( Xa )
2408     #
2409     if selfA._toStore("OMA") or \
2410         selfA._toStore("SigmaObs2") or \
2411         selfA._toStore("SimulationQuantiles") or \
2412         selfA._toStore("SimulatedObservationAtOptimum"):
2413         if selfA._toStore("SimulatedObservationAtCurrentState"):
2414             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2415         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2416             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2417         else:
2418             HXa = Hm( Xa )
2419     #
2420     if selfA._toStore("APosterioriCovariance") or \
2421         selfA._toStore("SimulationQuantiles") or \
2422         selfA._toStore("JacobianMatrixAtOptimum") or \
2423         selfA._toStore("KalmanGainAtOptimum"):
2424         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2425         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2426     if selfA._toStore("APosterioriCovariance") or \
2427         selfA._toStore("SimulationQuantiles") or \
2428         selfA._toStore("KalmanGainAtOptimum"):
2429         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2430         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2431     if selfA._toStore("APosterioriCovariance") or \
2432         selfA._toStore("SimulationQuantiles"):
2433         A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
2434     if selfA._toStore("APosterioriCovariance"):
2435         selfA.StoredVariables["APosterioriCovariance"].store( A )
2436     if selfA._toStore("JacobianMatrixAtOptimum"):
2437         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2438     if selfA._toStore("KalmanGainAtOptimum"):
2439         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2440         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2441         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2442     #
2443     # Calculs et/ou stockages supplémentaires
2444     # ---------------------------------------
2445     if selfA._toStore("Innovation") or \
2446         selfA._toStore("SigmaObs2") or \
2447         selfA._toStore("MahalanobisConsistency") or \
2448         selfA._toStore("OMB"):
2449         d  = Y - HXb
2450     if selfA._toStore("Innovation"):
2451         selfA.StoredVariables["Innovation"].store( d )
2452     if selfA._toStore("BMA"):
2453         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2454     if selfA._toStore("OMA"):
2455         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2456     if selfA._toStore("OMB"):
2457         selfA.StoredVariables["OMB"].store( d )
2458     if selfA._toStore("SigmaObs2"):
2459         TraceR = R.trace(Y.size)
2460         selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
2461     if selfA._toStore("MahalanobisConsistency"):
2462         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2463     if selfA._toStore("SimulationQuantiles"):
2464         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
2465     if selfA._toStore("SimulatedObservationAtBackground"):
2466         selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
2467     if selfA._toStore("SimulatedObservationAtOptimum"):
2468         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
2469     #
2470     return 0
2471
2472 # ==============================================================================
2473 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
2474     VariantM="MLEF13", BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000,
2475     Hybrid=None,
2476     ):
2477     """
2478     Maximum Likelihood Ensemble Filter
2479     """
2480     if selfA._parameters["EstimationOf"] == "Parameters":
2481         selfA._parameters["StoreInternalVariables"] = True
2482     #
2483     # Opérateurs
2484     H = HO["Direct"].appliedControledFormTo
2485     #
2486     if selfA._parameters["EstimationOf"] == "State":
2487         M = EM["Direct"].appliedControledFormTo
2488     #
2489     if CM is not None and "Tangent" in CM and U is not None:
2490         Cm = CM["Tangent"].asMatrix(Xb)
2491     else:
2492         Cm = None
2493     #
2494     # Durée d'observation et tailles
2495     if hasattr(Y,"stepnumber"):
2496         duration = Y.stepnumber()
2497         __p = numpy.cumprod(Y.shape())[-1]
2498     else:
2499         duration = 2
2500         __p = numpy.array(Y).size
2501     #
2502     # Précalcul des inversions de B et R
2503     if selfA._parameters["StoreInternalVariables"] \
2504         or selfA._toStore("CostFunctionJ") \
2505         or selfA._toStore("CostFunctionJb") \
2506         or selfA._toStore("CostFunctionJo") \
2507         or selfA._toStore("CurrentOptimum") \
2508         or selfA._toStore("APosterioriCovariance"):
2509         BI = B.getI()
2510     RI = R.getI()
2511     #
2512     __n = Xb.size
2513     __m = selfA._parameters["NumberOfMembers"]
2514     nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
2515     previousJMinimum = numpy.finfo(float).max
2516     #
2517     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2518         Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2519         selfA.StoredVariables["Analysis"].store( Xb )
2520         if selfA._toStore("APosterioriCovariance"):
2521             if hasattr(B,"asfullmatrix"):
2522                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
2523             else:
2524                 selfA.StoredVariables["APosterioriCovariance"].store( B )
2525         selfA._setInternalState("seed", numpy.random.get_state())
2526     elif selfA._parameters["nextStep"]:
2527         Xn = selfA._getInternalState("Xn")
2528     #
2529     for step in range(duration-1):
2530         numpy.random.set_state(selfA._getInternalState("seed"))
2531         if hasattr(Y,"store"):
2532             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2533         else:
2534             Ynpu = numpy.ravel( Y ).reshape((__p,1))
2535         #
2536         if U is not None:
2537             if hasattr(U,"store") and len(U)>1:
2538                 Un = numpy.ravel( U[step] ).reshape((-1,1))
2539             elif hasattr(U,"store") and len(U)==1:
2540                 Un = numpy.ravel( U[0] ).reshape((-1,1))
2541             else:
2542                 Un = numpy.ravel( U ).reshape((-1,1))
2543         else:
2544             Un = None
2545         #
2546         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2547             Xn = CovarianceInflation( Xn,
2548                 selfA._parameters["InflationType"],
2549                 selfA._parameters["InflationFactor"],
2550                 )
2551         #
2552         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2553             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2554                 argsAsSerie = True,
2555                 returnSerieAsArrayMatrix = True )
2556             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
2557             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2558                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2559                 Xn_predicted = Xn_predicted + Cm @ Un
2560         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2561             # --- > Par principe, M = Id, Q = 0
2562             Xn_predicted = EMX = Xn
2563         #
2564         #--------------------------
2565         if VariantM == "MLEF13":
2566             Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
2567             EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
2568             Ua  = numpy.identity(__m)
2569             __j = 0
2570             Deltaw = 1
2571             if not BnotT:
2572                 Ta  = numpy.identity(__m)
2573             vw  = numpy.zeros(__m)
2574             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2575                 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
2576                 #
2577                 if BnotT:
2578                     E1 = vx1 + _epsilon * EaX
2579                 else:
2580                     E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
2581                 #
2582                 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2583                     argsAsSerie = True,
2584                     returnSerieAsArrayMatrix = True )
2585                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2586                 #
2587                 if BnotT:
2588                     EaY = (HE2 - vy2) / _epsilon
2589                 else:
2590                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
2591                 #
2592                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
2593                 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
2594                 Deltaw = - numpy.linalg.solve(mH,GradJ)
2595                 #
2596                 vw = vw + Deltaw
2597                 #
2598                 if not BnotT:
2599                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2600                 #
2601                 __j = __j + 1
2602             #
2603             if BnotT:
2604                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2605             #
2606             Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
2607         #--------------------------
2608         else:
2609             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2610         #
2611         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2612             Xn = CovarianceInflation( Xn,
2613                 selfA._parameters["InflationType"],
2614                 selfA._parameters["InflationFactor"],
2615                 )
2616         #
2617         if Hybrid == "E3DVAR":
2618             betaf = selfA._parameters["HybridCovarianceEquilibrium"]
2619             Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
2620         #
2621         Xa = EnsembleMean( Xn )
2622         #--------------------------
2623         selfA._setInternalState("Xn", Xn)
2624         selfA._setInternalState("seed", numpy.random.get_state())
2625         #--------------------------
2626         #
2627         if selfA._parameters["StoreInternalVariables"] \
2628             or selfA._toStore("CostFunctionJ") \
2629             or selfA._toStore("CostFunctionJb") \
2630             or selfA._toStore("CostFunctionJo") \
2631             or selfA._toStore("APosterioriCovariance") \
2632             or selfA._toStore("InnovationAtCurrentAnalysis") \
2633             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2634             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2635             _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
2636             _Innovation = Ynpu - _HXa
2637         #
2638         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2639         # ---> avec analysis
2640         selfA.StoredVariables["Analysis"].store( Xa )
2641         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2642             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2643         if selfA._toStore("InnovationAtCurrentAnalysis"):
2644             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2645         # ---> avec current state
2646         if selfA._parameters["StoreInternalVariables"] \
2647             or selfA._toStore("CurrentState"):
2648             selfA.StoredVariables["CurrentState"].store( Xn )
2649         if selfA._toStore("ForecastState"):
2650             selfA.StoredVariables["ForecastState"].store( EMX )
2651         if selfA._toStore("ForecastCovariance"):
2652             selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
2653         if selfA._toStore("BMA"):
2654             selfA.StoredVariables["BMA"].store( EMX - Xa )
2655         if selfA._toStore("InnovationAtCurrentState"):
2656             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
2657         if selfA._toStore("SimulatedObservationAtCurrentState") \
2658             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2659             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2660         # ---> autres
2661         if selfA._parameters["StoreInternalVariables"] \
2662             or selfA._toStore("CostFunctionJ") \
2663             or selfA._toStore("CostFunctionJb") \
2664             or selfA._toStore("CostFunctionJo") \
2665             or selfA._toStore("CurrentOptimum") \
2666             or selfA._toStore("APosterioriCovariance"):
2667             Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
2668             Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
2669             J   = Jb + Jo
2670             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2671             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2672             selfA.StoredVariables["CostFunctionJ" ].store( J )
2673             #
2674             if selfA._toStore("IndexOfOptimum") \
2675                 or selfA._toStore("CurrentOptimum") \
2676                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2677                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2678                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2679                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2680                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2681             if selfA._toStore("IndexOfOptimum"):
2682                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2683             if selfA._toStore("CurrentOptimum"):
2684                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2685             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2686                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2687             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2688                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2689             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2690                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2691             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2692                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2693         if selfA._toStore("APosterioriCovariance"):
2694             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2695         if selfA._parameters["EstimationOf"] == "Parameters" \
2696             and J < previousJMinimum:
2697             previousJMinimum    = J
2698             XaMin               = Xa
2699             if selfA._toStore("APosterioriCovariance"):
2700                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
2701         # ---> Pour les smoothers
2702         if selfA._toStore("CurrentEnsembleState"):
2703             selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
2704     #
2705     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2706     # ----------------------------------------------------------------------
2707     if selfA._parameters["EstimationOf"] == "Parameters":
2708         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2709         selfA.StoredVariables["Analysis"].store( XaMin )
2710         if selfA._toStore("APosterioriCovariance"):
2711             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2712         if selfA._toStore("BMA"):
2713             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2714     #
2715     return 0
2716
2717 # ==============================================================================
2718 def mmqr(
2719         func     = None,
2720         x0       = None,
2721         fprime   = None,
2722         bounds   = None,
2723         quantile = 0.5,
2724         maxfun   = 15000,
2725         toler    = 1.e-06,
2726         y        = None,
2727         ):
2728     """
2729     Implémentation informatique de l'algorithme MMQR, basée sur la publication :
2730     David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
2731     Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
2732     """
2733     #
2734     # Recuperation des donnees et informations initiales
2735     # --------------------------------------------------
2736     variables = numpy.ravel( x0 )
2737     mesures   = numpy.ravel( y )
2738     increment = sys.float_info[0]
2739     p         = variables.size
2740     n         = mesures.size
2741     quantile  = float(quantile)
2742     #
2743     # Calcul des parametres du MM
2744     # ---------------------------
2745     tn      = float(toler) / n
2746     e0      = -tn / math.log(tn)
2747     epsilon = (e0-tn)/(1+math.log(e0))
2748     #
2749     # Calculs d'initialisation
2750     # ------------------------
2751     residus  = mesures - numpy.ravel( func( variables ) )
2752     poids    = 1./(epsilon+numpy.abs(residus))
2753     veps     = 1. - 2. * quantile - residus * poids
2754     lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
2755     iteration = 0
2756     #
2757     # Recherche iterative
2758     # -------------------
2759     while (increment > toler) and (iteration < maxfun) :
2760         iteration += 1
2761         #
2762         Derivees  = numpy.array(fprime(variables))
2763         Derivees  = Derivees.reshape(n,p) # ADAO & check shape
2764         DeriveesT = Derivees.transpose()
2765         M         =   numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
2766         SM        =   numpy.transpose(numpy.dot( DeriveesT , veps ))
2767         step      = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
2768         #
2769         variables = variables + step
2770         if bounds is not None:
2771             # Attention : boucle infinie à éviter si un intervalle est trop petit
2772             while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
2773                 step      = step/2.
2774                 variables = variables - step
2775         residus   = mesures - numpy.ravel( func(variables) )
2776         surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2777         #
2778         while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
2779             step      = step/2.
2780             variables = variables - step
2781             residus   = mesures - numpy.ravel( func(variables) )
2782             surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2783         #
2784         increment     = lastsurrogate-surrogate
2785         poids         = 1./(epsilon+numpy.abs(residus))
2786         veps          = 1. - 2. * quantile - residus * poids
2787         lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
2788     #
2789     # Mesure d'écart
2790     # --------------
2791     Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
2792     #
2793     return variables, Ecart, [n,p,iteration,increment,0]
2794
2795 # ==============================================================================
2796 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
2797     """
2798     3DVAR multi-pas et multi-méthodes
2799     """
2800     #
2801     # Initialisation
2802     # --------------
2803     if selfA._parameters["EstimationOf"] == "State":
2804         M = EM["Direct"].appliedControledFormTo
2805         if CM is not None and "Tangent" in CM and U is not None:
2806             Cm = CM["Tangent"].asMatrix(Xb)
2807         else:
2808             Cm = None
2809         #
2810         if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2811             Xn = numpy.ravel(Xb).reshape((-1,1))
2812             selfA.StoredVariables["Analysis"].store( Xn )
2813             if selfA._toStore("APosterioriCovariance"):
2814                 if hasattr(B,"asfullmatrix"):
2815                     selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(Xn.size) )
2816                 else:
2817                     selfA.StoredVariables["APosterioriCovariance"].store( B )
2818             if selfA._toStore("ForecastState"):
2819                 selfA.StoredVariables["ForecastState"].store( Xn )
2820         elif selfA._parameters["nextStep"]:
2821             Xn = selfA._getInternalState("Xn")
2822     else:
2823         Xn = numpy.ravel(Xb).reshape((-1,1))
2824     #
2825     if hasattr(Y,"stepnumber"):
2826         duration = Y.stepnumber()
2827     else:
2828         duration = 2
2829     #
2830     # Multi-pas
2831     for step in range(duration-1):
2832         if hasattr(Y,"store"):
2833             Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
2834         else:
2835             Ynpu = numpy.ravel( Y ).reshape((-1,1))
2836         #
2837         if U is not None:
2838             if hasattr(U,"store") and len(U)>1:
2839                 Un = numpy.ravel( U[step] ).reshape((-1,1))
2840             elif hasattr(U,"store") and len(U)==1:
2841                 Un = numpy.ravel( U[0] ).reshape((-1,1))
2842             else:
2843                 Un = numpy.ravel( U ).reshape((-1,1))
2844         else:
2845             Un = None
2846         #
2847         if selfA._parameters["EstimationOf"] == "State": # Forecast
2848             Xn_predicted = M( (Xn, Un) )
2849             if selfA._toStore("ForecastState"):
2850                 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
2851             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2852                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2853                 Xn_predicted = Xn_predicted + Cm @ Un
2854         elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
2855             # --- > Par principe, M = Id, Q = 0
2856             Xn_predicted = Xn
2857         Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
2858         #
2859         oneCycle(selfA, Xn_predicted, Ynpu, None, HO, None, None, R, B, None)
2860         #
2861         Xn = selfA.StoredVariables["Analysis"][-1]
2862         #--------------------------
2863         selfA._setInternalState("Xn", Xn)
2864     #
2865     return 0
2866
2867 # ==============================================================================
2868 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2869     """
2870     3DVAR PSAS
2871     """
2872     #
2873     # Initialisations
2874     # ---------------
2875     Hm = HO["Direct"].appliedTo
2876     #
2877     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2878         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2879     else:
2880         HXb = Hm( Xb )
2881     HXb = numpy.ravel( HXb ).reshape((-1,1))
2882     if Y.size != HXb.size:
2883         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2884     if max(Y.shape) != max(HXb.shape):
2885         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2886     #
2887     if selfA._toStore("JacobianMatrixAtBackground"):
2888         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2889         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2890         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2891     #
2892     Ht = HO["Tangent"].asMatrix(Xb)
2893     BHT = B * Ht.T
2894     HBHTpR = R + Ht * BHT
2895     Innovation = Y - HXb
2896     #
2897     Xini = numpy.zeros(Xb.shape)
2898     #
2899     # Définition de la fonction-coût
2900     # ------------------------------
2901     def CostFunction(w):
2902         _W = w.reshape((-1,1))
2903         if selfA._parameters["StoreInternalVariables"] or \
2904             selfA._toStore("CurrentState") or \
2905             selfA._toStore("CurrentOptimum"):
2906             selfA.StoredVariables["CurrentState"].store( Xb + BHT @ _W )
2907         if selfA._toStore("SimulatedObservationAtCurrentState") or \
2908             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2909             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT @ _W ) )
2910         if selfA._toStore("InnovationAtCurrentState"):
2911             selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
2912         #
2913         Jb  = float( 0.5 * _W.T @ (HBHTpR @ _W) )
2914         Jo  = float( - _W.T @ Innovation )
2915         J   = Jb + Jo
2916         #
2917         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2918         selfA.StoredVariables["CostFunctionJb"].store( Jb )
2919         selfA.StoredVariables["CostFunctionJo"].store( Jo )
2920         selfA.StoredVariables["CostFunctionJ" ].store( J )
2921         if selfA._toStore("IndexOfOptimum") or \
2922             selfA._toStore("CurrentOptimum") or \
2923             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2924             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2925             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2926             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2927             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2928         if selfA._toStore("IndexOfOptimum"):
2929             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2930         if selfA._toStore("CurrentOptimum"):
2931             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2932         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2933             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2934         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2935             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2936         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2937             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2938         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2939             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2940         return J
2941     #
2942     def GradientOfCostFunction(w):
2943         _W = w.reshape((-1,1))
2944         GradJb  = HBHTpR @ _W
2945         GradJo  = - Innovation
2946         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2947         return GradJ
2948     #
2949     # Minimisation de la fonctionnelle
2950     # --------------------------------
2951     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2952     #
2953     if selfA._parameters["Minimizer"] == "LBFGSB":
2954         if "0.19" <= scipy.version.version <= "1.1.0":
2955             import lbfgsbhlt as optimiseur
2956         else:
2957             import scipy.optimize as optimiseur
2958         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2959             func        = CostFunction,
2960             x0          = Xini,
2961             fprime      = GradientOfCostFunction,
2962             args        = (),
2963             bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
2964             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
2965             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
2966             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2967             iprint      = selfA._parameters["optiprint"],
2968             )
2969         nfeval = Informations['funcalls']
2970         rc     = Informations['warnflag']
2971     elif selfA._parameters["Minimizer"] == "TNC":
2972         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2973             func        = CostFunction,
2974             x0          = Xini,
2975             fprime      = GradientOfCostFunction,
2976             args        = (),
2977             bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
2978             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
2979             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2980             ftol        = selfA._parameters["CostDecrementTolerance"],
2981             messages    = selfA._parameters["optmessages"],
2982             )
2983     elif selfA._parameters["Minimizer"] == "CG":
2984         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2985             f           = CostFunction,
2986             x0          = Xini,
2987             fprime      = GradientOfCostFunction,
2988             args        = (),
2989             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2990             gtol        = selfA._parameters["GradientNormTolerance"],
2991             disp        = selfA._parameters["optdisp"],
2992             full_output = True,
2993             )
2994     elif selfA._parameters["Minimizer"] == "NCG":
2995         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2996             f           = CostFunction,
2997             x0          = Xini,
2998             fprime      = GradientOfCostFunction,
2999             args        = (),
3000             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3001             avextol     = selfA._parameters["CostDecrementTolerance"],
3002             disp        = selfA._parameters["optdisp"],
3003             full_output = True,
3004             )
3005     elif selfA._parameters["Minimizer"] == "BFGS":
3006         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3007             f           = CostFunction,
3008             x0          = Xini,
3009             fprime      = GradientOfCostFunction,
3010             args        = (),
3011             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3012             gtol        = selfA._parameters["GradientNormTolerance"],
3013             disp        = selfA._parameters["optdisp"],
3014             full_output = True,
3015             )
3016     else:
3017         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3018     #
3019     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3020     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3021     #
3022     # Correction pour pallier a un bug de TNC sur le retour du Minimum
3023     # ----------------------------------------------------------------
3024     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3025         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3026     else:
3027         Minimum = Xb + BHT @ Minimum.reshape((-1,1))
3028     #
3029     Xa = Minimum
3030     #--------------------------
3031     #
3032     selfA.StoredVariables["Analysis"].store( Xa )
3033     #
3034     if selfA._toStore("OMA") or \
3035         selfA._toStore("SigmaObs2") or \
3036         selfA._toStore("SimulationQuantiles") or \
3037         selfA._toStore("SimulatedObservationAtOptimum"):
3038         if selfA._toStore("SimulatedObservationAtCurrentState"):
3039             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3040         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3041             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3042         else:
3043             HXa = Hm( Xa )
3044     #
3045     if selfA._toStore("APosterioriCovariance") or \
3046         selfA._toStore("SimulationQuantiles") or \
3047         selfA._toStore("JacobianMatrixAtOptimum") or \
3048         selfA._toStore("KalmanGainAtOptimum"):
3049         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3050         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3051     if selfA._toStore("APosterioriCovariance") or \
3052         selfA._toStore("SimulationQuantiles") or \
3053         selfA._toStore("KalmanGainAtOptimum"):
3054         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3055         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3056     if selfA._toStore("APosterioriCovariance") or \
3057         selfA._toStore("SimulationQuantiles"):
3058         BI = B.getI()
3059         RI = R.getI()
3060         A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
3061     if selfA._toStore("APosterioriCovariance"):
3062         selfA.StoredVariables["APosterioriCovariance"].store( A )
3063     if selfA._toStore("JacobianMatrixAtOptimum"):
3064         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3065     if selfA._toStore("KalmanGainAtOptimum"):
3066         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3067         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3068         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3069     #
3070     # Calculs et/ou stockages supplémentaires
3071     # ---------------------------------------
3072     if selfA._toStore("Innovation") or \
3073         selfA._toStore("SigmaObs2") or \
3074         selfA._toStore("MahalanobisConsistency") or \
3075         selfA._toStore("OMB"):
3076         d  = Y - HXb
3077     if selfA._toStore("Innovation"):
3078         selfA.StoredVariables["Innovation"].store( d )
3079     if selfA._toStore("BMA"):
3080         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3081     if selfA._toStore("OMA"):
3082         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3083     if selfA._toStore("OMB"):
3084         selfA.StoredVariables["OMB"].store( d )
3085     if selfA._toStore("SigmaObs2"):
3086         TraceR = R.trace(Y.size)
3087         selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
3088     if selfA._toStore("MahalanobisConsistency"):
3089         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3090     if selfA._toStore("SimulationQuantiles"):
3091         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
3092     if selfA._toStore("SimulatedObservationAtBackground"):
3093         selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
3094     if selfA._toStore("SimulatedObservationAtOptimum"):
3095         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
3096     #
3097     return 0
3098
3099 # ==============================================================================
3100 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
3101     VariantM="KalmanFilterFormula16",
3102     Hybrid=None,
3103     ):
3104     """
3105     Stochastic EnKF
3106     """
3107     if selfA._parameters["EstimationOf"] == "Parameters":
3108         selfA._parameters["StoreInternalVariables"] = True
3109     #
3110     # Opérateurs
3111     H = HO["Direct"].appliedControledFormTo
3112     #
3113     if selfA._parameters["EstimationOf"] == "State":
3114         M = EM["Direct"].appliedControledFormTo
3115     #
3116     if CM is not None and "Tangent" in CM and U is not None:
3117         Cm = CM["Tangent"].asMatrix(Xb)
3118     else:
3119         Cm = None
3120     #
3121     # Durée d'observation et tailles
3122     if hasattr(Y,"stepnumber"):
3123         duration = Y.stepnumber()
3124         __p = numpy.cumprod(Y.shape())[-1]
3125     else:
3126         duration = 2
3127         __p = numpy.array(Y).size
3128     #
3129     # Précalcul des inversions de B et R
3130     if selfA._parameters["StoreInternalVariables"] \
3131         or selfA._toStore("CostFunctionJ") \
3132         or selfA._toStore("CostFunctionJb") \
3133         or selfA._toStore("CostFunctionJo") \
3134         or selfA._toStore("CurrentOptimum") \
3135         or selfA._toStore("APosterioriCovariance"):
3136         BI = B.getI()
3137         RI = R.getI()
3138     #
3139     __n = Xb.size
3140     __m = selfA._parameters["NumberOfMembers"]
3141     nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
3142     previousJMinimum = numpy.finfo(float).max
3143     #
3144     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
3145     else:                         Rn = R
3146     #
3147     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
3148         if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
3149         else:                         Pn = B
3150         Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
3151         selfA.StoredVariables["Analysis"].store( Xb )
3152         if selfA._toStore("APosterioriCovariance"):
3153             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3154         selfA._setInternalState("seed", numpy.random.get_state())
3155     elif selfA._parameters["nextStep"]:
3156         Xn = selfA._getInternalState("Xn")
3157     #
3158     for step in range(duration-1):
3159         numpy.random.set_state(selfA._getInternalState("seed"))
3160         if hasattr(Y,"store"):
3161             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
3162         else:
3163             Ynpu = numpy.ravel( Y ).reshape((__p,1))
3164         #
3165         if U is not None:
3166             if hasattr(U,"store") and len(U)>1:
3167                 Un = numpy.ravel( U[step] ).reshape((-1,1))
3168             elif hasattr(U,"store") and len(U)==1:
3169                 Un = numpy.ravel( U[0] ).reshape((-1,1))
3170             else:
3171                 Un = numpy.ravel( U ).reshape((-1,1))
3172         else:
3173             Un = None
3174         #
3175         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
3176             Xn = CovarianceInflation( Xn,
3177                 selfA._parameters["InflationType"],
3178                 selfA._parameters["InflationFactor"],
3179                 )
3180         #
3181         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
3182             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
3183                 argsAsSerie = True,
3184                 returnSerieAsArrayMatrix = True )
3185             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
3186             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
3187                 argsAsSerie = True,
3188                 returnSerieAsArrayMatrix = True )
3189             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
3190                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
3191                 Xn_predicted = Xn_predicted + Cm @ Un
3192         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
3193             # --- > Par principe, M = Id, Q = 0
3194             Xn_predicted = EMX = Xn
3195             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
3196                 argsAsSerie = True,
3197                 returnSerieAsArrayMatrix = True )
3198         #
3199         # Mean of forecast and observation of forecast
3200         Xfm  = EnsembleMean( Xn_predicted )
3201         Hfm  = EnsembleMean( HX_predicted )
3202         #
3203         #--------------------------
3204         if VariantM == "KalmanFilterFormula05":
3205             PfHT, HPfHT = 0., 0.
3206             for i in range(__m):
3207                 Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
3208                 Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
3209                 PfHT  += Exfi * Eyfi.T
3210                 HPfHT += Eyfi * Eyfi.T
3211             PfHT  = (1./(__m-1)) * PfHT
3212             HPfHT = (1./(__m-1)) * HPfHT
3213             Kn     = PfHT * ( R + HPfHT ).I
3214             del PfHT, HPfHT
3215             #
3216             for i in range(__m):
3217                 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
3218                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
3219         #--------------------------
3220         elif VariantM == "KalmanFilterFormula16":
3221             EpY   = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
3222             EpYm  = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
3223             #
3224             EaX   = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
3225             EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
3226             #
3227             Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
3228             #
3229             for i in range(__m):
3230                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
3231         #--------------------------
3232         else:
3233             raise ValueError("VariantM has to be chosen in the authorized methods list.")
3234         #
3235         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
3236             Xn = CovarianceInflation( Xn,
3237                 selfA._parameters["InflationType"],
3238                 selfA._parameters["InflationFactor"],
3239                 )
3240         #
3241         if Hybrid == "E3DVAR":
3242             betaf = selfA._parameters["HybridCovarianceEquilibrium"]
3243             Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
3244         #
3245         Xa = EnsembleMean( Xn )
3246         #--------------------------
3247         selfA._setInternalState("Xn", Xn)
3248         selfA._setInternalState("seed", numpy.random.get_state())
3249         #--------------------------
3250         #
3251         if selfA._parameters["StoreInternalVariables"] \
3252             or selfA._toStore("CostFunctionJ") \
3253             or selfA._toStore("CostFunctionJb") \
3254             or selfA._toStore("CostFunctionJo") \
3255             or selfA._toStore("APosterioriCovariance") \
3256             or selfA._toStore("InnovationAtCurrentAnalysis") \
3257             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
3258             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3259             _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
3260             _Innovation = Ynpu - _HXa
3261         #
3262         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3263         # ---> avec analysis
3264         selfA.StoredVariables["Analysis"].store( Xa )
3265         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
3266             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
3267         if selfA._toStore("InnovationAtCurrentAnalysis"):
3268             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
3269         # ---> avec current state
3270         if selfA._parameters["StoreInternalVariables"] \
3271             or selfA._toStore("CurrentState"):
3272             selfA.StoredVariables["CurrentState"].store( Xn )
3273         if selfA._toStore("ForecastState"):
3274             selfA.StoredVariables["ForecastState"].store( EMX )
3275         if selfA._toStore("ForecastCovariance"):
3276             selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
3277         if selfA._toStore("BMA"):
3278             selfA.StoredVariables["BMA"].store( EMX - Xa )
3279         if selfA._toStore("InnovationAtCurrentState"):
3280             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
3281         if selfA._toStore("SimulatedObservationAtCurrentState") \
3282             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3283             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
3284         # ---> autres
3285         if selfA._parameters["StoreInternalVariables"] \
3286             or selfA._toStore("CostFunctionJ") \
3287             or selfA._toStore("CostFunctionJb") \
3288             or selfA._toStore("CostFunctionJo") \
3289             or selfA._toStore("CurrentOptimum") \
3290             or selfA._toStore("APosterioriCovariance"):
3291             Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
3292             Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
3293             J   = Jb + Jo
3294             selfA.StoredVariables["CostFunctionJb"].store( Jb )
3295             selfA.StoredVariables["CostFunctionJo"].store( Jo )
3296             selfA.StoredVariables["CostFunctionJ" ].store( J )
3297             #
3298             if selfA._toStore("IndexOfOptimum") \
3299                 or selfA._toStore("CurrentOptimum") \
3300                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
3301                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
3302                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
3303                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3304                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3305             if selfA._toStore("IndexOfOptimum"):
3306                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3307             if selfA._toStore("CurrentOptimum"):
3308                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
3309             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3310                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
3311             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3312                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3313             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3314                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3315             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3316                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3317         if selfA._toStore("APosterioriCovariance"):
3318             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
3319         if selfA._parameters["EstimationOf"] == "Parameters" \
3320             and J < previousJMinimum:
3321             previousJMinimum    = J
3322             XaMin               = Xa
3323             if selfA._toStore("APosterioriCovariance"):
3324                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
3325         # ---> Pour les smoothers
3326         if selfA._toStore("CurrentEnsembleState"):
3327             selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
3328     #
3329     # Stockage final supplémentaire de l'optimum en estimation de paramètres
3330     # ----------------------------------------------------------------------
3331     if selfA._parameters["EstimationOf"] == "Parameters":
3332         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3333         selfA.StoredVariables["Analysis"].store( XaMin )
3334         if selfA._toStore("APosterioriCovariance"):
3335             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3336         if selfA._toStore("BMA"):
3337             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3338     #
3339     return 0
3340
3341 # ==============================================================================
3342 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3343     """
3344     3DVAR
3345     """
3346     #
3347     # Initialisations
3348     # ---------------
3349     Hm = HO["Direct"].appliedTo
3350     Ha = HO["Adjoint"].appliedInXTo
3351     #
3352     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
3353         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
3354     else:
3355         HXb = Hm( Xb )
3356     HXb = HXb.reshape((-1,1))
3357     if Y.size != HXb.size:
3358         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
3359     if max(Y.shape) != max(HXb.shape):
3360         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
3361     #
3362     if selfA._toStore("JacobianMatrixAtBackground"):
3363         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
3364         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
3365         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
3366     #
3367     BI = B.getI()
3368     RI = R.getI()
3369     #
3370     Xini = selfA._parameters["InitializationPoint"]
3371     #
3372     # Définition de la fonction-coût
3373     # ------------------------------
3374     def CostFunction(x):
3375         _X  = numpy.ravel( x ).reshape((-1,1))
3376         if selfA._parameters["StoreInternalVariables"] or \
3377             selfA._toStore("CurrentState") or \
3378             selfA._toStore("CurrentOptimum"):
3379             selfA.StoredVariables["CurrentState"].store( _X )
3380         _HX = Hm( _X ).reshape((-1,1))
3381         _Innovation = Y - _HX
3382         if selfA._toStore("SimulatedObservationAtCurrentState") or \
3383             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3384             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
3385         if selfA._toStore("InnovationAtCurrentState"):
3386             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3387         #
3388         Jb  = float( 0.5 * (_X - Xb).T * (BI * (_X - Xb)) )
3389         Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
3390         J   = Jb + Jo
3391         #
3392         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3393         selfA.StoredVariables["CostFunctionJb"].store( Jb )
3394         selfA.StoredVariables["CostFunctionJo"].store( Jo )
3395         selfA.StoredVariables["CostFunctionJ" ].store( J )
3396         if selfA._toStore("IndexOfOptimum") or \
3397             selfA._toStore("CurrentOptimum") or \
3398             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3399             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3400             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
3401             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3402             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3403         if selfA._toStore("IndexOfOptimum"):
3404             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3405         if selfA._toStore("CurrentOptimum"):
3406             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3407         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3408             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
3409         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3410             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3411         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3412             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3413         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3414             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3415         return J
3416     #
3417     def GradientOfCostFunction(x):
3418         _X      = x.reshape((-1,1))
3419         _HX     = Hm( _X ).reshape((-1,1))
3420         GradJb  = BI * (_X - Xb)
3421         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
3422         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
3423         return GradJ
3424     #
3425     # Minimisation de la fonctionnelle
3426     # --------------------------------
3427     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3428     #
3429     if selfA._parameters["Minimizer"] == "LBFGSB":
3430         if "0.19" <= scipy.version.version <= "1.1.0":
3431             import lbfgsbhlt as optimiseur
3432         else:
3433             import scipy.optimize as optimiseur
3434         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3435             func        = CostFunction,
3436             x0          = Xini,
3437             fprime      = GradientOfCostFunction,
3438             args        = (),
3439             bounds      = selfA._parameters["Bounds"],
3440             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
3441             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
3442             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3443             iprint      = selfA._parameters["optiprint"],
3444             )
3445         nfeval = Informations['funcalls']
3446         rc     = Informations['warnflag']
3447     elif selfA._parameters["Minimizer"] == "TNC":
3448         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3449             func        = CostFunction,
3450             x0          = Xini,
3451             fprime      = GradientOfCostFunction,
3452             args        = (),
3453             bounds      = selfA._parameters["Bounds"],
3454             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
3455             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3456             ftol        = selfA._parameters["CostDecrementTolerance"],
3457             messages    = selfA._parameters["optmessages"],
3458             )
3459     elif selfA._parameters["Minimizer"] == "CG":
3460         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3461             f           = CostFunction,
3462             x0          = Xini,
3463             fprime      = GradientOfCostFunction,
3464             args        = (),
3465             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3466             gtol        = selfA._parameters["GradientNormTolerance"],
3467             disp        = selfA._parameters["optdisp"],
3468             full_output = True,
3469             )
3470     elif selfA._parameters["Minimizer"] == "NCG":
3471         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3472             f           = CostFunction,
3473             x0          = Xini,
3474             fprime      = GradientOfCostFunction,
3475             args        = (),
3476             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3477             avextol     = selfA._parameters["CostDecrementTolerance"],
3478             disp        = selfA._parameters["optdisp"],
3479             full_output = True,
3480             )
3481     elif selfA._parameters["Minimizer"] == "BFGS":
3482         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3483             f           = CostFunction,
3484             x0          = Xini,
3485             fprime      = GradientOfCostFunction,
3486             args        = (),
3487             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3488             gtol        = selfA._parameters["GradientNormTolerance"],
3489             disp        = selfA._parameters["optdisp"],
3490             full_output = True,
3491             )
3492     else:
3493         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3494     #
3495     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3496     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3497     #
3498     # Correction pour pallier a un bug de TNC sur le retour du Minimum
3499     # ----------------------------------------------------------------
3500     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3501         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3502     #
3503     Xa = Minimum
3504     #--------------------------
3505     #
3506     selfA.StoredVariables["Analysis"].store( Xa )
3507     #
3508     if selfA._toStore("OMA") or \
3509         selfA._toStore("SigmaObs2") or \
3510         selfA._toStore("SimulationQuantiles") or \
3511         selfA._toStore("SimulatedObservationAtOptimum"):
3512         if selfA._toStore("SimulatedObservationAtCurrentState"):
3513             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3514         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3515             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3516         else:
3517             HXa = Hm( Xa )
3518     #
3519     if selfA._toStore("APosterioriCovariance") or \
3520         selfA._toStore("SimulationQuantiles") or \
3521         selfA._toStore("JacobianMatrixAtOptimum") or \
3522         selfA._toStore("KalmanGainAtOptimum"):
3523         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3524         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3525     if selfA._toStore("APosterioriCovariance") or \
3526         selfA._toStore("SimulationQuantiles") or \
3527         selfA._toStore("KalmanGainAtOptimum"):
3528         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3529         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3530     if selfA._toStore("APosterioriCovariance") or \
3531         selfA._toStore("SimulationQuantiles"):
3532         A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
3533     if selfA._toStore("APosterioriCovariance"):
3534         selfA.StoredVariables["APosterioriCovariance"].store( A )
3535     if selfA._toStore("JacobianMatrixAtOptimum"):
3536         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3537     if selfA._toStore("KalmanGainAtOptimum"):
3538         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3539         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3540         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3541     #
3542     # Calculs et/ou stockages supplémentaires
3543     # ---------------------------------------
3544     if selfA._toStore("Innovation") or \
3545         selfA._toStore("SigmaObs2") or \
3546         selfA._toStore("MahalanobisConsistency") or \
3547         selfA._toStore("OMB"):
3548         d  = Y - HXb
3549     if selfA._toStore("Innovation"):
3550         selfA.StoredVariables["Innovation"].store( d )
3551     if selfA._toStore("BMA"):
3552         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3553     if selfA._toStore("OMA"):
3554         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3555     if selfA._toStore("OMB"):
3556         selfA.StoredVariables["OMB"].store( d )
3557     if selfA._toStore("SigmaObs2"):
3558         TraceR = R.trace(Y.size)
3559         selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
3560     if selfA._toStore("MahalanobisConsistency"):
3561         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3562     if selfA._toStore("SimulationQuantiles"):
3563         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
3564     if selfA._toStore("SimulatedObservationAtBackground"):
3565         selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
3566     if selfA._toStore("SimulatedObservationAtOptimum"):
3567         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
3568     #
3569     return 0
3570
3571 # ==============================================================================
3572 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3573     """
3574     4DVAR
3575     """
3576     #
3577     # Initialisations
3578     # ---------------
3579     #
3580     # Opérateurs
3581     Hm = HO["Direct"].appliedControledFormTo
3582     Mm = EM["Direct"].appliedControledFormTo
3583     #
3584     if CM is not None and "Tangent" in CM and U is not None:
3585         Cm = CM["Tangent"].asMatrix(Xb)
3586     else:
3587         Cm = None
3588     #
3589     def Un(_step):
3590         if U is not None:
3591             if hasattr(U,"store") and 1<=_step<len(U) :
3592                 _Un = numpy.ravel( U[_step] ).reshape((-1,1))
3593             elif hasattr(U,"store") and len(U)==1:
3594                 _Un = numpy.ravel( U[0] ).reshape((-1,1))
3595             else:
3596                 _Un = numpy.ravel( U ).reshape((-1,1))
3597         else:
3598             _Un = None
3599         return _Un
3600     def CmUn(_xn,_un):
3601         if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
3602             _Cm   = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
3603             _CmUn = (_Cm @ _un).reshape((-1,1))
3604         else:
3605             _CmUn = 0.
3606         return _CmUn
3607     #
3608     # Remarque : les observations sont exploitées à partir du pas de temps
3609     # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
3610     # Donc le pas 0 n'est pas utilisé puisque la première étape commence
3611     # avec l'observation du pas 1.
3612     #
3613     # Nombre de pas identique au nombre de pas d'observations
3614     if hasattr(Y,"stepnumber"):
3615         duration = Y.stepnumber()
3616     else:
3617         duration = 2
3618     #
3619     # Précalcul des inversions de B et R
3620     BI = B.getI()
3621     RI = R.getI()
3622     #
3623     # Point de démarrage de l'optimisation
3624     Xini = selfA._parameters["InitializationPoint"]
3625     #
3626     # Définition de la fonction-coût
3627     # ------------------------------
3628     selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
3629     selfA.DirectInnovation  = [None,] # Le pas 0 n'est pas observé
3630     def CostFunction(x):
3631         _X  = numpy.ravel( x ).reshape((-1,1))
3632         if selfA._parameters["StoreInternalVariables"] or \
3633             selfA._toStore("CurrentState") or \
3634             selfA._toStore("CurrentOptimum"):
3635             selfA.StoredVariables["CurrentState"].store( _X )
3636         Jb  = float( 0.5 * (_X - Xb).T * (BI * (_X - Xb)) )
3637         selfA.DirectCalculation = [None,]
3638         selfA.DirectInnovation  = [None,]
3639         Jo  = 0.
3640         _Xn = _X
3641         for step in range(0,duration-1):
3642             if hasattr(Y,"store"):
3643                 _Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
3644             else:
3645                 _Ynpu = numpy.ravel( Y ).reshape((-1,1))
3646             _Un = Un(step)
3647             #
3648             # Etape d'évolution
3649             if selfA._parameters["EstimationOf"] == "State":
3650                 _Xn = Mm( (_Xn, _Un) ).reshape((-1,1)) + CmUn(_Xn, _Un)
3651             elif selfA._parameters["EstimationOf"] == "Parameters":
3652                 pass
3653             #
3654             if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3655                 _Xn = ApplyBounds( _Xn, ForceNumericBounds(selfA._parameters["Bounds"]) )
3656             #
3657             # Etape de différence aux observations
3658             if selfA._parameters["EstimationOf"] == "State":
3659                 _YmHMX = _Ynpu - numpy.ravel( Hm( (_Xn, None) ) ).reshape((-1,1))
3660             elif selfA._parameters["EstimationOf"] == "Parameters":
3661                 _YmHMX = _Ynpu - numpy.ravel( Hm( (_Xn, _Un) ) ).reshape((-1,1)) - CmUn(_Xn, _Un)
3662             #
3663             # Stockage de l'état
3664             selfA.DirectCalculation.append( _Xn )
3665             selfA.DirectInnovation.append( _YmHMX )
3666             #
3667             # Ajout dans la fonctionnelle d'observation
3668             Jo = Jo + 0.5 * float( _YmHMX.T * (RI * _YmHMX) )
3669         J = Jb + Jo
3670         #
3671         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3672         selfA.StoredVariables["CostFunctionJb"].store( Jb )
3673         selfA.StoredVariables["CostFunctionJo"].store( Jo )
3674         selfA.StoredVariables["CostFunctionJ" ].store( J )
3675         if selfA._toStore("IndexOfOptimum") or \
3676             selfA._toStore("CurrentOptimum") or \
3677             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3678             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3679             selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3680             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3681         if selfA._toStore("IndexOfOptimum"):
3682             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3683         if selfA._toStore("CurrentOptimum"):
3684             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3685         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3686             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3687         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3688             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3689         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3690             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3691         return J
3692     #
3693     def GradientOfCostFunction(x):
3694         _X      = numpy.ravel( x ).reshape((-1,1))
3695         GradJb  = BI * (_X - Xb)
3696         GradJo  = 0.
3697         for step in range(duration-1,0,-1):
3698             # Étape de récupération du dernier stockage de l'évolution
3699             _Xn = selfA.DirectCalculation.pop()
3700             # Étape de récupération du dernier stockage de l'innovation
3701             _YmHMX = selfA.DirectInnovation.pop()
3702             # Calcul des adjoints
3703             Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
3704             Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
3705             Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
3706             Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
3707             # Calcul du gradient par état adjoint
3708             GradJo = GradJo + Ha * (RI * _YmHMX) # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
3709             GradJo = Ma * GradJo                 # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
3710         GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
3711         return GradJ
3712     #
3713     # Minimisation de la fonctionnelle
3714     # --------------------------------
3715     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3716     #
3717     if selfA._parameters["Minimizer"] == "LBFGSB":
3718         if "0.19" <= scipy.version.version <= "1.1.0":
3719             import lbfgsbhlt as optimiseur
3720         else:
3721             import scipy.optimize as optimiseur
3722         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3723             func        = CostFunction,
3724             x0          = Xini,
3725             fprime      = GradientOfCostFunction,
3726             args        = (),
3727             bounds      = selfA._parameters["Bounds"],
3728             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
3729             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
3730             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3731             iprint      = selfA._parameters["optiprint"],
3732             )
3733         nfeval = Informations['funcalls']
3734         rc     = Informations['warnflag']
3735     elif selfA._parameters["Minimizer"] == "TNC":
3736         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3737             func        = CostFunction,
3738             x0          = Xini,
3739             fprime      = GradientOfCostFunction,
3740             args        = (),
3741             bounds      = selfA._parameters["Bounds"],
3742             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
3743             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3744             ftol        = selfA._parameters["CostDecrementTolerance"],
3745             messages    = selfA._parameters["optmessages"],
3746             )
3747     elif selfA._parameters["Minimizer"] == "CG":
3748         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3749             f           = CostFunction,
3750             x0          = Xini,
3751             fprime      = GradientOfCostFunction,
3752             args        = (),
3753             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3754             gtol        = selfA._parameters["GradientNormTolerance"],
3755             disp        = selfA._parameters["optdisp"],
3756             full_output = True,
3757             )
3758     elif selfA._parameters["Minimizer"] == "NCG":
3759         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3760             f           = CostFunction,
3761             x0          = Xini,
3762             fprime      = GradientOfCostFunction,
3763             args        = (),
3764             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3765             avextol     = selfA._parameters["CostDecrementTolerance"],
3766             disp        = selfA._parameters["optdisp"],
3767             full_output = True,
3768             )
3769     elif selfA._parameters["Minimizer"] == "BFGS":
3770         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3771             f           = CostFunction,
3772             x0          = Xini,
3773             fprime      = GradientOfCostFunction,
3774             args        = (),
3775             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3776             gtol        = selfA._parameters["GradientNormTolerance"],
3777             disp        = selfA._parameters["optdisp"],
3778             full_output = True,
3779             )
3780     else:
3781         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3782     #
3783     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3784     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3785     #
3786     # Correction pour pallier a un bug de TNC sur le retour du Minimum
3787     # ----------------------------------------------------------------
3788     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3789         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3790     #
3791     # Obtention de l'analyse
3792     # ----------------------
3793     Xa = Minimum
3794     #
3795     selfA.StoredVariables["Analysis"].store( Xa )
3796     #
3797     # Calculs et/ou stockages supplémentaires
3798     # ---------------------------------------
3799     if selfA._toStore("BMA"):
3800         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3801     #
3802     return 0
3803
3804 # ==============================================================================
3805 def stdkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3806     """
3807     Standard Kalman Filter
3808     """
3809     if selfA._parameters["EstimationOf"] == "Parameters":
3810         selfA._parameters["StoreInternalVariables"] = True
3811     #
3812     # Opérateurs
3813     # ----------
3814     Ht = HO["Tangent"].asMatrix(Xb)
3815     Ha = HO["Adjoint"].asMatrix(Xb)
3816     #
3817     if selfA._parameters["EstimationOf"] == "State":
3818         Mt = EM["Tangent"].asMatrix(Xb)
3819         Ma = EM["Adjoint"].asMatrix(Xb)
3820     #
3821     if CM is not None and "Tangent" in CM and U is not None:
3822         Cm = CM["Tangent"].asMatrix(Xb)
3823     else:
3824         Cm = None
3825     #
3826     # Durée d'observation et tailles
3827     if hasattr(Y,"stepnumber"):
3828         duration = Y.stepnumber()
3829         __p = numpy.cumprod(Y.shape())[-1]
3830     else:
3831         duration = 2
3832         __p = numpy.array(Y).size
3833     #
3834     # Précalcul des inversions de B et R
3835     if selfA._parameters["StoreInternalVariables"] \
3836         or selfA._toStore("CostFunctionJ") \
3837         or selfA._toStore("CostFunctionJb") \
3838         or selfA._toStore("CostFunctionJo") \
3839         or selfA._toStore("CurrentOptimum") \
3840         or selfA._toStore("APosterioriCovariance"):
3841         BI = B.getI()
3842         RI = R.getI()
3843     #
3844     __n = Xb.size
3845     nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
3846     #
3847     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
3848         Xn = Xb
3849         Pn = B
3850         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3851         selfA.StoredVariables["Analysis"].store( Xb )
3852         if selfA._toStore("APosterioriCovariance"):
3853             if hasattr(B,"asfullmatrix"):
3854                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
3855             else:
3856                 selfA.StoredVariables["APosterioriCovariance"].store( B )
3857         selfA._setInternalState("seed", numpy.random.get_state())
3858     elif selfA._parameters["nextStep"]:
3859         Xn = selfA._getInternalState("Xn")
3860         Pn = selfA._getInternalState("Pn")
3861     #
3862     if selfA._parameters["EstimationOf"] == "Parameters":
3863         XaMin            = Xn
3864         previousJMinimum = numpy.finfo(float).max
3865     #
3866     for step in range(duration-1):
3867         if hasattr(Y,"store"):
3868             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
3869         else:
3870             Ynpu = numpy.ravel( Y ).reshape((__p,1))
3871         #
3872         if U is not None:
3873             if hasattr(U,"store") and len(U)>1:
3874                 Un = numpy.ravel( U[step] ).reshape((-1,1))
3875             elif hasattr(U,"store") and len(U)==1:
3876                 Un = numpy.ravel( U[0] ).reshape((-1,1))
3877             else:
3878                 Un = numpy.ravel( U ).reshape((-1,1))
3879         else:
3880             Un = None
3881         #
3882         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
3883             Xn_predicted = Mt @ Xn
3884             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
3885                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
3886                 Xn_predicted = Xn_predicted + Cm @ Un
3887             Pn_predicted = Q + Mt * (Pn * Ma)
3888         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
3889             # --- > Par principe, M = Id, Q = 0
3890             Xn_predicted = Xn
3891             Pn_predicted = Pn
3892         #
3893         if selfA._parameters["EstimationOf"] == "State":
3894             HX_predicted = Ht @ Xn_predicted
3895             _Innovation  = Ynpu - HX_predicted
3896         elif selfA._parameters["EstimationOf"] == "Parameters":
3897             HX_predicted = Ht @ Xn_predicted
3898             _Innovation  = Ynpu - HX_predicted
3899             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
3900                 _Innovation = _Innovation - Cm @ Un
3901         #
3902         Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
3903         Xn = Xn_predicted + Kn * _Innovation
3904         Pn = Pn_predicted - Kn * Ht * Pn_predicted
3905         #
3906         Xa = Xn # Pointeurs
3907         #--------------------------
3908         selfA._setInternalState("Xn", Xn)
3909         selfA._setInternalState("Pn", Pn)
3910         #--------------------------
3911         #
3912         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3913         # ---> avec analysis
3914         selfA.StoredVariables["Analysis"].store( Xa )
3915         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
3916             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Ht * Xa )
3917         if selfA._toStore("InnovationAtCurrentAnalysis"):
3918             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
3919         # ---> avec current state
3920         if selfA._parameters["StoreInternalVariables"] \
3921             or selfA._toStore("CurrentState"):
3922             selfA.StoredVariables["CurrentState"].store( Xn )
3923         if selfA._toStore("ForecastState"):
3924             selfA.StoredVariables["ForecastState"].store( Xn_predicted )
3925         if selfA._toStore("ForecastCovariance"):
3926             selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
3927         if selfA._toStore("BMA"):
3928             selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
3929         if selfA._toStore("InnovationAtCurrentState"):
3930             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3931         if selfA._toStore("SimulatedObservationAtCurrentState") \
3932             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3933             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
3934         # ---> autres
3935         if selfA._parameters["StoreInternalVariables"] \
3936             or selfA._toStore("CostFunctionJ") \
3937             or selfA._toStore("CostFunctionJb") \
3938             or selfA._toStore("CostFunctionJo") \
3939             or selfA._toStore("CurrentOptimum") \
3940             or selfA._toStore("APosterioriCovariance"):
3941             Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
3942             Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
3943             J   = Jb + Jo
3944             selfA.StoredVariables["CostFunctionJb"].store( Jb )
3945             selfA.StoredVariables["CostFunctionJo"].store( Jo )
3946             selfA.StoredVariables["CostFunctionJ" ].store( J )
3947             #
3948             if selfA._toStore("IndexOfOptimum") \
3949                 or selfA._toStore("CurrentOptimum") \
3950                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
3951                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
3952                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
3953                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3954                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3955             if selfA._toStore("IndexOfOptimum"):
3956                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3957             if selfA._toStore("CurrentOptimum"):
3958                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
3959             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3960                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
3961             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3962                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3963             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3964                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3965             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3966                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3967         if selfA._toStore("APosterioriCovariance"):
3968             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3969         if selfA._parameters["EstimationOf"] == "Parameters" \
3970             and J < previousJMinimum:
3971             previousJMinimum    = J
3972             XaMin               = Xa
3973             if selfA._toStore("APosterioriCovariance"):
3974                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
3975     #
3976     # Stockage final supplémentaire de l'optimum en estimation de paramètres
3977     # ----------------------------------------------------------------------
3978     if selfA._parameters["EstimationOf"] == "Parameters":
3979         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3980         selfA.StoredVariables["Analysis"].store( XaMin )
3981         if selfA._toStore("APosterioriCovariance"):
3982             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3983         if selfA._toStore("BMA"):
3984             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3985     #
3986     return 0
3987
3988 # ==============================================================================
3989 def uskf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3990     """
3991     Unscented Kalman Filter
3992     """
3993     if selfA._parameters["EstimationOf"] == "Parameters":
3994         selfA._parameters["StoreInternalVariables"] = True
3995     #
3996     L     = Xb.size
3997     Alpha = selfA._parameters["Alpha"]
3998     Beta  = selfA._parameters["Beta"]
3999     if selfA._parameters["Kappa"] == 0:
4000         if selfA._parameters["EstimationOf"] == "State":
4001             Kappa = 0
4002         elif selfA._parameters["EstimationOf"] == "Parameters":
4003             Kappa = 3 - L
4004     else:
4005         Kappa = selfA._parameters["Kappa"]
4006     Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
4007     Gamma  = math.sqrt( L + Lambda )
4008     #
4009     Ww = []
4010     Ww.append( 0. )
4011     for i in range(2*L):
4012         Ww.append( 1. / (2.*(L + Lambda)) )
4013     #
4014     Wm = numpy.array( Ww )
4015     Wm[0] = Lambda / (L + Lambda)
4016     Wc = numpy.array( Ww )
4017     Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
4018     #
4019     # Opérateurs
4020     Hm = HO["Direct"].appliedControledFormTo
4021     #
4022     if selfA._parameters["EstimationOf"] == "State":
4023         Mm = EM["Direct"].appliedControledFormTo
4024     #
4025     if CM is not None and "Tangent" in CM and U is not None:
4026         Cm = CM["Tangent"].asMatrix(Xb)
4027     else:
4028         Cm = None
4029     #
4030     # Durée d'observation et tailles
4031     if hasattr(Y,"stepnumber"):
4032         duration = Y.stepnumber()
4033         __p = numpy.cumprod(Y.shape())[-1]
4034     else:
4035         duration = 2
4036         __p = numpy.array(Y).size
4037     #
4038     # Précalcul des inversions de B et R
4039     if selfA._parameters["StoreInternalVariables"] \
4040         or selfA._toStore("CostFunctionJ") \
4041         or selfA._toStore("CostFunctionJb") \
4042         or selfA._toStore("CostFunctionJo") \
4043         or selfA._toStore("CurrentOptimum") \
4044         or selfA._toStore("APosterioriCovariance"):
4045         BI = B.getI()
4046         RI = R.getI()
4047     #
4048     __n = Xb.size
4049     nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
4050     #
4051     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
4052         Xn = Xb
4053         if hasattr(B,"asfullmatrix"):
4054             Pn = B.asfullmatrix(__n)
4055         else:
4056             Pn = B
4057         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
4058         selfA.StoredVariables["Analysis"].store( Xb )
4059         if selfA._toStore("APosterioriCovariance"):
4060             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
4061     elif selfA._parameters["nextStep"]:
4062         Xn = selfA._getInternalState("Xn")
4063         Pn = selfA._getInternalState("Pn")
4064     #
4065     if selfA._parameters["EstimationOf"] == "Parameters":
4066         XaMin            = Xn
4067         previousJMinimum = numpy.finfo(float).max
4068     #
4069     for step in range(duration-1):
4070         if hasattr(Y,"store"):
4071             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
4072         else:
4073             Ynpu = numpy.ravel( Y ).reshape((__p,1))
4074         #
4075         if U is not None:
4076             if hasattr(U,"store") and len(U)>1:
4077                 Un = numpy.ravel( U[step] ).reshape((-1,1))
4078             elif hasattr(U,"store") and len(U)==1:
4079                 Un = numpy.ravel( U[0] ).reshape((-1,1))
4080             else:
4081                 Un = numpy.ravel( U ).reshape((-1,1))
4082         else:
4083             Un = None
4084         #
4085         Pndemi = numpy.real(scipy.linalg.sqrtm(Pn))
4086         Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
4087         nbSpts = 2*Xn.size+1
4088         #
4089         XEtnnp = []
4090         for point in range(nbSpts):
4091             if selfA._parameters["EstimationOf"] == "State":
4092                 XEtnnpi = numpy.asarray( Mm( (Xnp[:,point], Un) ) ).reshape((-1,1))
4093                 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
4094                     Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
4095                     XEtnnpi = XEtnnpi + Cm @ Un
4096             elif selfA._parameters["EstimationOf"] == "Parameters":
4097                 # --- > Par principe, M = Id, Q = 0
4098                 XEtnnpi = Xnp[:,point]
4099             XEtnnp.append( numpy.ravel(XEtnnpi).reshape((-1,1)) )
4100         XEtnnp = numpy.concatenate( XEtnnp, axis=1 )
4101         #
4102         Xncm = ( XEtnnp * Wm ).sum(axis=1)
4103         #
4104         if selfA._parameters["EstimationOf"] == "State":        Pnm = Q
4105         elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
4106         for point in range(nbSpts):
4107             Pnm += Wc[i] * ((XEtnnp[:,point]-Xncm).reshape((-1,1)) * (XEtnnp[:,point]-Xncm))
4108         #
4109         Pnmdemi = numpy.real(scipy.linalg.sqrtm(Pnm))
4110         #
4111         Xnnp = numpy.hstack([Xncm.reshape((-1,1)), Xncm.reshape((-1,1))+Gamma*Pnmdemi, Xncm.reshape((-1,1))-Gamma*Pnmdemi])
4112         #
4113         Ynnp = []
4114         for point in range(nbSpts):
4115             if selfA._parameters["EstimationOf"] == "State":
4116                 Ynnpi = Hm( (Xnnp[:,point], None) )
4117             elif selfA._parameters["EstimationOf"] == "Parameters":
4118                 Ynnpi = Hm( (Xnnp[:,point], Un) )
4119             Ynnp.append( numpy.ravel(Ynnpi).reshape((-1,1)) )
4120         Ynnp = numpy.concatenate( Ynnp, axis=1 )
4121         #
4122         Yncm = ( Ynnp * Wm ).sum(axis=1)
4123         #
4124         Pyyn = R
4125         Pxyn = 0.
4126         for point in range(nbSpts):
4127             Pyyn += Wc[i] * ((Ynnp[:,point]-Yncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
4128             Pxyn += Wc[i] * ((Xnnp[:,point]-Xncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
4129         #
4130         _Innovation  = Ynpu - Yncm.reshape((-1,1))
4131         if selfA._parameters["EstimationOf"] == "Parameters":
4132             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
4133                 _Innovation = _Innovation - Cm @ Un
4134         #
4135         Kn = Pxyn * Pyyn.I
4136         Xn = Xncm.reshape((-1,1)) + Kn * _Innovation
4137         Pn = Pnm - Kn * Pyyn * Kn.T
4138         #
4139         Xa = Xn # Pointeurs
4140         #--------------------------
4141         selfA._setInternalState("Xn", Xn)
4142         selfA._setInternalState("Pn", Pn)
4143         #--------------------------
4144         #
4145         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
4146         # ---> avec analysis
4147         selfA.StoredVariables["Analysis"].store( Xa )
4148         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
4149             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) )
4150         if selfA._toStore("InnovationAtCurrentAnalysis"):
4151             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
4152         # ---> avec current state
4153         if selfA._parameters["StoreInternalVariables"] \
4154             or selfA._toStore("CurrentState"):
4155             selfA.StoredVariables["CurrentState"].store( Xn )
4156         if selfA._toStore("ForecastState"):
4157             selfA.StoredVariables["ForecastState"].store( Xncm )
4158         if selfA._toStore("ForecastCovariance"):
4159             selfA.StoredVariables["ForecastCovariance"].store( Pnm )
4160         if selfA._toStore("BMA"):
4161             selfA.StoredVariables["BMA"].store( Xncm - Xa )
4162         if selfA._toStore("InnovationAtCurrentState"):
4163             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
4164         if selfA._toStore("SimulatedObservationAtCurrentState") \
4165             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4166             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm )
4167         # ---> autres
4168         if selfA._parameters["StoreInternalVariables"] \
4169             or selfA._toStore("CostFunctionJ") \
4170             or selfA._toStore("CostFunctionJb") \
4171             or selfA._toStore("CostFunctionJo") \
4172             or selfA._toStore("CurrentOptimum") \
4173             or selfA._toStore("APosterioriCovariance"):
4174             Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
4175             Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
4176             J   = Jb + Jo
4177             selfA.StoredVariables["CostFunctionJb"].store( Jb )
4178             selfA.StoredVariables["CostFunctionJo"].store( Jo )
4179             selfA.StoredVariables["CostFunctionJ" ].store( J )
4180             #
4181             if selfA._toStore("IndexOfOptimum") \
4182                 or selfA._toStore("CurrentOptimum") \
4183                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
4184                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
4185                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
4186                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4187                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4188             if selfA._toStore("IndexOfOptimum"):
4189                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
4190             if selfA._toStore("CurrentOptimum"):
4191                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
4192             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4193                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
4194             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
4195                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
4196             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
4197                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
4198             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
4199                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
4200         if selfA._toStore("APosterioriCovariance"):
4201             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
4202         if selfA._parameters["EstimationOf"] == "Parameters" \
4203             and J < previousJMinimum:
4204             previousJMinimum    = J
4205             XaMin               = Xa
4206             if selfA._toStore("APosterioriCovariance"):
4207                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
4208     #
4209     # Stockage final supplémentaire de l'optimum en estimation de paramètres
4210     # ----------------------------------------------------------------------
4211     if selfA._parameters["EstimationOf"] == "Parameters":
4212         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
4213         selfA.StoredVariables["Analysis"].store( XaMin )
4214         if selfA._toStore("APosterioriCovariance"):
4215             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
4216         if selfA._toStore("BMA"):
4217             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
4218     #
4219     return 0
4220
4221 # ==============================================================================
4222 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
4223     """
4224     3DVAR variational analysis with no inversion of B
4225     """
4226     #
4227     # Initialisations
4228     # ---------------
4229     Hm = HO["Direct"].appliedTo
4230     Ha = HO["Adjoint"].appliedInXTo
4231     #
4232     BT = B.getT()
4233     RI = R.getI()
4234     #
4235     Xini = numpy.zeros(Xb.shape)
4236     #
4237     # Définition de la fonction-coût
4238     # ------------------------------
4239     def CostFunction(v):
4240         _V = numpy.ravel( v ).reshape((-1,1))
4241         _X = Xb + B * _V
4242         if selfA._parameters["StoreInternalVariables"] or \
4243             selfA._toStore("CurrentState") or \
4244             selfA._toStore("CurrentOptimum"):
4245             selfA.StoredVariables["CurrentState"].store( _X )
4246         _HX = Hm( _X )
4247         _HX = numpy.ravel( _HX ).reshape((-1,1))
4248         _Innovation = Y - _HX
4249         if selfA._toStore("SimulatedObservationAtCurrentState") or \
4250             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4251             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
4252         if selfA._toStore("InnovationAtCurrentState"):
4253             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
4254         #
4255         Jb  = float( 0.5 * _V.T * (BT * _V) )
4256         Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
4257         J   = Jb + Jo
4258         #
4259         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
4260         selfA.StoredVariables["CostFunctionJb"].store( Jb )
4261         selfA.StoredVariables["CostFunctionJo"].store( Jo )
4262         selfA.StoredVariables["CostFunctionJ" ].store( J )
4263         if selfA._toStore("IndexOfOptimum") or \
4264             selfA._toStore("CurrentOptimum") or \
4265             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
4266             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
4267             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
4268             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4269             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4270         if selfA._toStore("IndexOfOptimum"):
4271             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
4272         if selfA._toStore("CurrentOptimum"):
4273             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
4274         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4275             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
4276         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
4277             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
4278         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
4279             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
4280         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
4281             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
4282         return J
4283     #
4284     def GradientOfCostFunction(v):
4285         _V = v.reshape((-1,1))
4286         _X = Xb + (B @ _V).reshape((-1,1))
4287         _HX     = Hm( _X ).reshape((-1,1))
4288         GradJb  = BT * _V
4289         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
4290         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
4291         return GradJ
4292     #
4293     # Minimisation de la fonctionnelle
4294     # --------------------------------
4295     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
4296     #
4297     if selfA._parameters["Minimizer"] == "LBFGSB":
4298         if "0.19" <= scipy.version.version <= "1.1.0":
4299             import lbfgsbhlt as optimiseur
4300         else:
4301             import scipy.optimize as optimiseur
4302         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
4303             func        = CostFunction,
4304             x0          = Xini,
4305             fprime      = GradientOfCostFunction,
4306             args        = (),
4307             bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
4308             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
4309             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
4310             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
4311             iprint      = selfA._parameters["optiprint"],
4312             )
4313         nfeval = Informations['funcalls']
4314         rc     = Informations['warnflag']
4315     elif selfA._parameters["Minimizer"] == "TNC":
4316         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
4317             func        = CostFunction,
4318             x0          = Xini,
4319             fprime      = GradientOfCostFunction,
4320             args        = (),
4321             bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
4322             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
4323             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
4324             ftol        = selfA._parameters["CostDecrementTolerance"],
4325             messages    = selfA._parameters["optmessages"],
4326             )
4327     elif selfA._parameters["Minimizer"] == "CG":
4328         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
4329             f           = CostFunction,
4330             x0          = Xini,
4331             fprime      = GradientOfCostFunction,
4332             args        = (),
4333             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
4334             gtol        = selfA._parameters["GradientNormTolerance"],
4335             disp        = selfA._parameters["optdisp"],
4336             full_output = True,
4337             )
4338     elif selfA._parameters["Minimizer"] == "NCG":
4339         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
4340             f           = CostFunction,
4341             x0          = Xini,
4342             fprime      = GradientOfCostFunction,
4343             args        = (),
4344             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
4345             avextol     = selfA._parameters["CostDecrementTolerance"],
4346             disp        = selfA._parameters["optdisp"],
4347             full_output = True,
4348             )
4349     elif selfA._parameters["Minimizer"] == "BFGS":
4350         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
4351             f           = CostFunction,
4352             x0          = Xini,
4353             fprime      = GradientOfCostFunction,
4354             args        = (),
4355             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
4356             gtol        = selfA._parameters["GradientNormTolerance"],
4357             disp        = selfA._parameters["optdisp"],
4358             full_output = True,
4359             )
4360     else:
4361         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
4362     #
4363     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4364     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
4365     #
4366     # Correction pour pallier a un bug de TNC sur le retour du Minimum
4367     # ----------------------------------------------------------------
4368     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
4369         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
4370     else:
4371         Minimum = Xb + B * Minimum.reshape((-1,1)) # Pas @
4372     #
4373     Xa = Minimum
4374     #--------------------------
4375     #
4376     selfA.StoredVariables["Analysis"].store( Xa )
4377     #
4378     if selfA._toStore("OMA") or \
4379         selfA._toStore("SigmaObs2") or \
4380         selfA._toStore("SimulationQuantiles") or \
4381         selfA._toStore("SimulatedObservationAtOptimum"):
4382         if selfA._toStore("SimulatedObservationAtCurrentState"):
4383             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
4384         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4385             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
4386         else:
4387             HXa = Hm( Xa )
4388     #
4389     if selfA._toStore("APosterioriCovariance") or \
4390         selfA._toStore("SimulationQuantiles") or \
4391         selfA._toStore("JacobianMatrixAtOptimum") or \
4392         selfA._toStore("KalmanGainAtOptimum"):
4393         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
4394         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
4395     if selfA._toStore("APosterioriCovariance") or \
4396         selfA._toStore("SimulationQuantiles") or \
4397         selfA._toStore("KalmanGainAtOptimum"):
4398         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
4399         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
4400     if selfA._toStore("APosterioriCovariance") or \
4401         selfA._toStore("SimulationQuantiles"):
4402         BI = B.getI()
4403         A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
4404     if selfA._toStore("APosterioriCovariance"):
4405         selfA.StoredVariables["APosterioriCovariance"].store( A )
4406     if selfA._toStore("JacobianMatrixAtOptimum"):
4407         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
4408     if selfA._toStore("KalmanGainAtOptimum"):
4409         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
4410         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
4411         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
4412     #
4413     # Calculs et/ou stockages supplémentaires
4414     # ---------------------------------------
4415     if selfA._toStore("Innovation") or \
4416         selfA._toStore("SigmaObs2") or \
4417         selfA._toStore("MahalanobisConsistency") or \
4418         selfA._toStore("OMB"):
4419         d  = Y - HXb
4420     if selfA._toStore("Innovation"):
4421         selfA.StoredVariables["Innovation"].store( d )
4422     if selfA._toStore("BMA"):
4423         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
4424     if selfA._toStore("OMA"):
4425         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
4426     if selfA._toStore("OMB"):
4427         selfA.StoredVariables["OMB"].store( d )
4428     if selfA._toStore("SigmaObs2"):
4429         TraceR = R.trace(Y.size)
4430         selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
4431     if selfA._toStore("MahalanobisConsistency"):
4432         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
4433     if selfA._toStore("SimulationQuantiles"):
4434         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
4435     if selfA._toStore("SimulatedObservationAtBackground"):
4436         selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
4437     if selfA._toStore("SimulatedObservationAtOptimum"):
4438         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
4439     #
4440     return 0
4441
4442 # ==============================================================================
4443 if __name__ == "__main__":
4444     print('\n AUTODIAGNOSTIC\n')