Salome HOME
Minor documentation and code review corrections (14)
[modules/adao.git] / src / daComposant / daCore / NumericObjects.py
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) 2008-2021 EDF R&D
4 #
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
9 #
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13 # Lesser General Public License for more details.
14 #
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18 #
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
22
23 __doc__ = """
24     Définit les objets numériques génériques.
25 """
26 __author__ = "Jean-Philippe ARGAUD"
27
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator, Covariance, PartialAlgorithm
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
35
36 # ==============================================================================
37 def ExecuteFunction( triplet ):
38     assert len(triplet) == 3, "Incorrect number of arguments"
39     X, xArgs, funcrepr = triplet
40     __X = numpy.ravel( X ).reshape((-1,1))
41     __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42     __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43     __fonction = getattr(__module,funcrepr["__userFunction__name"])
44     sys.path = __sys_path_tmp ; del __sys_path_tmp
45     if isinstance(xArgs, dict):
46         __HX  = __fonction( __X, **xArgs )
47     else:
48         __HX  = __fonction( __X )
49     return numpy.ravel( __HX )
50
51 # ==============================================================================
52 class FDApproximation(object):
53     """
54     Cette classe sert d'interface pour définir les opérateurs approximés. A la
55     création d'un objet, en fournissant une fonction "Function", on obtient un
56     objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
57     "AdjointOperator". On contrôle l'approximation DF avec l'incrément
58     multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
59     "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
60     centrées si le booléen "centeredDF" est vrai.
61     """
62     def __init__(self,
63             name                  = "FDApproximation",
64             Function              = None,
65             centeredDF            = False,
66             increment             = 0.01,
67             dX                    = None,
68             extraArguments        = None,
69             reducingMemoryUse     = False,
70             avoidingRedundancy    = True,
71             toleranceInRedundancy = 1.e-18,
72             lenghtOfRedundancy    = -1,
73             mpEnabled             = False,
74             mpWorkers             = None,
75             mfEnabled             = False,
76             ):
77         self.__name = str(name)
78         self.__extraArgs = extraArguments
79         #
80         if mpEnabled:
81             try:
82                 import multiprocessing
83                 self.__mpEnabled = True
84             except ImportError:
85                 self.__mpEnabled = False
86         else:
87             self.__mpEnabled = False
88         self.__mpWorkers = mpWorkers
89         if self.__mpWorkers is not None and self.__mpWorkers < 1:
90             self.__mpWorkers = None
91         logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
92         #
93         self.__mfEnabled = bool(mfEnabled)
94         logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
95         #
96         self.__rmEnabled = bool(reducingMemoryUse)
97         logging.debug("FDA Calculs avec réduction mémoire : %s"%(self.__rmEnabled,))
98         #
99         if avoidingRedundancy:
100             self.__avoidRC = True
101             self.__tolerBP = float(toleranceInRedundancy)
102             self.__lenghtRJ = int(lenghtOfRedundancy)
103             self.__listJPCP = [] # Jacobian Previous Calculated Points
104             self.__listJPCI = [] # Jacobian Previous Calculated Increment
105             self.__listJPCR = [] # Jacobian Previous Calculated Results
106             self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
107             self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
108         else:
109             self.__avoidRC = False
110         logging.debug("FDA Calculs avec réduction des doublons : %s"%self.__avoidRC)
111         if self.__avoidRC:
112             logging.debug("FDA Tolérance de détermination des doublons : %.2e"%self.__tolerBP)
113         #
114         if self.__mpEnabled:
115             if isinstance(Function,types.FunctionType):
116                 logging.debug("FDA Calculs en multiprocessing : FunctionType")
117                 self.__userFunction__name = Function.__name__
118                 try:
119                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
120                 except:
121                     mod = os.path.abspath(Function.__globals__['__file__'])
122                 if not os.path.isfile(mod):
123                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
124                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
125                 self.__userFunction__path = os.path.dirname(mod)
126                 del mod
127                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
128                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
129             elif isinstance(Function,types.MethodType):
130                 logging.debug("FDA Calculs en multiprocessing : MethodType")
131                 self.__userFunction__name = Function.__name__
132                 try:
133                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
134                 except:
135                     mod = os.path.abspath(Function.__func__.__globals__['__file__'])
136                 if not os.path.isfile(mod):
137                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
138                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
139                 self.__userFunction__path = os.path.dirname(mod)
140                 del mod
141                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
142                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
143             else:
144                 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
145         else:
146             self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
147             self.__userFunction = self.__userOperator.appliedTo
148         #
149         self.__centeredDF = bool(centeredDF)
150         if abs(float(increment)) > 1.e-15:
151             self.__increment  = float(increment)
152         else:
153             self.__increment  = 0.01
154         if dX is None:
155             self.__dX     = None
156         else:
157             self.__dX     = numpy.ravel( dX )
158
159     # ---------------------------------------------------------
160     def __doublon__(self, e, l, n, v=None):
161         __ac, __iac = False, -1
162         for i in range(len(l)-1,-1,-1):
163             if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
164                 __ac, __iac = True, i
165                 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
166                 break
167         return __ac, __iac
168
169     # ---------------------------------------------------------
170     def __listdotwith__(self, __LMatrix, __dotWith = None, __dotTWith = None):
171         "Produit incrémental d'une matrice liste de colonnes avec un vecteur"
172         if not isinstance(__LMatrix, (list,tuple)):
173             raise TypeError("Columnwise list matrix has not the proper type: %s"%type(__LMatrix))
174         if __dotWith is not None:
175             __Idwx = numpy.ravel( __dotWith )
176             assert len(__LMatrix) == __Idwx.size, "Incorrect size of elements"
177             __Produit = numpy.zeros(__LMatrix[0].size)
178             for i, col in enumerate(__LMatrix):
179                 __Produit += float(__Idwx[i]) * col
180             return __Produit
181         elif __dotTWith is not None:
182             _Idwy = numpy.ravel( __dotTWith ).T
183             assert __LMatrix[0].size == _Idwy.size, "Incorrect size of elements"
184             __Produit = numpy.zeros(len(__LMatrix))
185             for i, col in enumerate(__LMatrix):
186                 __Produit[i] = float( _Idwy @ col)
187             return __Produit
188         else:
189             __Produit = None
190         return __Produit
191
192     # ---------------------------------------------------------
193     def DirectOperator(self, X, **extraArgs ):
194         """
195         Calcul du direct à l'aide de la fonction fournie.
196
197         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
198         ne doivent pas être données ici à la fonction utilisateur.
199         """
200         logging.debug("FDA Calcul DirectOperator (explicite)")
201         if self.__mfEnabled:
202             _HX = self.__userFunction( X, argsAsSerie = True )
203         else:
204             _HX = numpy.ravel(self.__userFunction( numpy.ravel(X) ))
205         #
206         return _HX
207
208     # ---------------------------------------------------------
209     def TangentMatrix(self, X, dotWith = None, dotTWith = None ):
210         """
211         Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
212         c'est-à-dire le gradient de H en X. On utilise des différences finies
213         directionnelles autour du point X. X est un numpy.ndarray.
214
215         Différences finies centrées (approximation d'ordre 2):
216         1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
217            dX[i] à la  composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
218            on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
219            H( X_moins_dXi )
220         2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
221            le pas 2*dXi
222         3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
223
224         Différences finies non centrées (approximation d'ordre 1):
225         1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
226            composante X[i] pour composer X_plus_dXi, et on calcule la réponse
227            HX_plus_dXi = H( X_plus_dXi )
228         2/ On calcule la valeur centrale HX = H(X)
229         3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
230            le pas dXi
231         4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
232
233         """
234         logging.debug("FDA Début du calcul de la Jacobienne")
235         logging.debug("FDA   Incrément de............: %s*X"%float(self.__increment))
236         logging.debug("FDA   Approximation centrée...: %s"%(self.__centeredDF))
237         #
238         if X is None or len(X)==0:
239             raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
240         #
241         _X = numpy.ravel( X )
242         #
243         if self.__dX is None:
244             _dX  = self.__increment * _X
245         else:
246             _dX = numpy.ravel( self.__dX )
247         assert len(_X) == len(_dX), "Inconsistent dX increment length with respect to the X one"
248         assert _X.size == _dX.size, "Inconsistent dX increment size with respect to the X one"
249         #
250         if (_dX == 0.).any():
251             moyenne = _dX.mean()
252             if moyenne == 0.:
253                 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
254             else:
255                 _dX = numpy.where( _dX == 0., moyenne, _dX )
256         #
257         __alreadyCalculated  = False
258         if self.__avoidRC:
259             __bidon, __alreadyCalculatedP = self.__doublon__(_X,  self.__listJPCP, self.__listJPPN, None)
260             __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
261             if __alreadyCalculatedP == __alreadyCalculatedI > -1:
262                 __alreadyCalculated, __i = True, __alreadyCalculatedP
263                 logging.debug("FDA Cas J déjà calculé, récupération du doublon %i"%__i)
264         #
265         if __alreadyCalculated:
266             logging.debug("FDA   Calcul Jacobienne (par récupération du doublon %i)"%__i)
267             _Jacobienne = self.__listJPCR[__i]
268             logging.debug("FDA Fin du calcul de la Jacobienne")
269             if dotWith is not None:
270                 return numpy.dot(_Jacobienne,   numpy.ravel( dotWith ))
271             elif dotTWith is not None:
272                 return numpy.dot(_Jacobienne.T, numpy.ravel( dotTWith ))
273         else:
274             logging.debug("FDA   Calcul Jacobienne (explicite)")
275             if self.__centeredDF:
276                 #
277                 if self.__mpEnabled and not self.__mfEnabled:
278                     funcrepr = {
279                         "__userFunction__path" : self.__userFunction__path,
280                         "__userFunction__modl" : self.__userFunction__modl,
281                         "__userFunction__name" : self.__userFunction__name,
282                     }
283                     _jobs = []
284                     for i in range( len(_dX) ):
285                         _dXi            = _dX[i]
286                         _X_plus_dXi     = numpy.array( _X, dtype=float )
287                         _X_plus_dXi[i]  = _X[i] + _dXi
288                         _X_moins_dXi    = numpy.array( _X, dtype=float )
289                         _X_moins_dXi[i] = _X[i] - _dXi
290                         #
291                         _jobs.append( (_X_plus_dXi,  self.__extraArgs, funcrepr) )
292                         _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
293                     #
294                     import multiprocessing
295                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
296                     _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
297                     self.__pool.close()
298                     self.__pool.join()
299                     #
300                     _Jacobienne  = []
301                     for i in range( len(_dX) ):
302                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
303                     #
304                 elif self.__mfEnabled:
305                     _xserie = []
306                     for i in range( len(_dX) ):
307                         _dXi            = _dX[i]
308                         _X_plus_dXi     = numpy.array( _X, dtype=float )
309                         _X_plus_dXi[i]  = _X[i] + _dXi
310                         _X_moins_dXi    = numpy.array( _X, dtype=float )
311                         _X_moins_dXi[i] = _X[i] - _dXi
312                         #
313                         _xserie.append( _X_plus_dXi )
314                         _xserie.append( _X_moins_dXi )
315                     #
316                     _HX_plusmoins_dX = self.DirectOperator( _xserie )
317                      #
318                     _Jacobienne  = []
319                     for i in range( len(_dX) ):
320                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
321                     #
322                 else:
323                     _Jacobienne  = []
324                     for i in range( _dX.size ):
325                         _dXi            = _dX[i]
326                         _X_plus_dXi     = numpy.array( _X, dtype=float )
327                         _X_plus_dXi[i]  = _X[i] + _dXi
328                         _X_moins_dXi    = numpy.array( _X, dtype=float )
329                         _X_moins_dXi[i] = _X[i] - _dXi
330                         #
331                         _HX_plus_dXi    = self.DirectOperator( _X_plus_dXi )
332                         _HX_moins_dXi   = self.DirectOperator( _X_moins_dXi )
333                         #
334                         _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
335                 #
336             else:
337                 #
338                 if self.__mpEnabled and not self.__mfEnabled:
339                     funcrepr = {
340                         "__userFunction__path" : self.__userFunction__path,
341                         "__userFunction__modl" : self.__userFunction__modl,
342                         "__userFunction__name" : self.__userFunction__name,
343                     }
344                     _jobs = []
345                     _jobs.append( (_X, self.__extraArgs, funcrepr) )
346                     for i in range( len(_dX) ):
347                         _X_plus_dXi    = numpy.array( _X, dtype=float )
348                         _X_plus_dXi[i] = _X[i] + _dX[i]
349                         #
350                         _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
351                     #
352                     import multiprocessing
353                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
354                     _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
355                     self.__pool.close()
356                     self.__pool.join()
357                     #
358                     _HX = _HX_plus_dX.pop(0)
359                     #
360                     _Jacobienne = []
361                     for i in range( len(_dX) ):
362                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
363                     #
364                 elif self.__mfEnabled:
365                     _xserie = []
366                     _xserie.append( _X )
367                     for i in range( len(_dX) ):
368                         _X_plus_dXi    = numpy.array( _X, dtype=float )
369                         _X_plus_dXi[i] = _X[i] + _dX[i]
370                         #
371                         _xserie.append( _X_plus_dXi )
372                     #
373                     _HX_plus_dX = self.DirectOperator( _xserie )
374                     #
375                     _HX = _HX_plus_dX.pop(0)
376                     #
377                     _Jacobienne = []
378                     for i in range( len(_dX) ):
379                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
380                    #
381                 else:
382                     _Jacobienne  = []
383                     _HX = self.DirectOperator( _X )
384                     for i in range( _dX.size ):
385                         _dXi            = _dX[i]
386                         _X_plus_dXi     = numpy.array( _X, dtype=float )
387                         _X_plus_dXi[i]  = _X[i] + _dXi
388                         #
389                         _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
390                         #
391                         _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
392             #
393             if (dotWith is not None) or (dotTWith is not None):
394                 __Produit = self.__listdotwith__(_Jacobienne, dotWith, dotTWith)
395             else:
396                 __Produit = None
397             if __Produit is None or self.__avoidRC:
398                 _Jacobienne = numpy.transpose( numpy.vstack( _Jacobienne ) )
399                 if self.__avoidRC:
400                     if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
401                     while len(self.__listJPCP) > self.__lenghtRJ:
402                         self.__listJPCP.pop(0)
403                         self.__listJPCI.pop(0)
404                         self.__listJPCR.pop(0)
405                         self.__listJPPN.pop(0)
406                         self.__listJPIN.pop(0)
407                     self.__listJPCP.append( copy.copy(_X) )
408                     self.__listJPCI.append( copy.copy(_dX) )
409                     self.__listJPCR.append( copy.copy(_Jacobienne) )
410                     self.__listJPPN.append( numpy.linalg.norm(_X) )
411                     self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
412             logging.debug("FDA Fin du calcul de la Jacobienne")
413             if __Produit is not None:
414                 return __Produit
415         #
416         return _Jacobienne
417
418     # ---------------------------------------------------------
419     def TangentOperator(self, paire, **extraArgs ):
420         """
421         Calcul du tangent à l'aide de la Jacobienne.
422
423         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
424         ne doivent pas être données ici à la fonction utilisateur.
425         """
426         if self.__mfEnabled:
427             assert len(paire) == 1, "Incorrect length of arguments"
428             _paire = paire[0]
429             assert len(_paire) == 2, "Incorrect number of arguments"
430         else:
431             assert len(paire) == 2, "Incorrect number of arguments"
432             _paire = paire
433         X, dX = _paire
434         if dX is None or len(dX) == 0:
435             #
436             # Calcul de la forme matricielle si le second argument est None
437             # -------------------------------------------------------------
438             _Jacobienne = self.TangentMatrix( X )
439             if self.__mfEnabled: return [_Jacobienne,]
440             else:                return _Jacobienne
441         else:
442             #
443             # Calcul de la valeur linéarisée de H en X appliqué à dX
444             # ------------------------------------------------------
445             _HtX = self.TangentMatrix( X, dotWith = dX )
446             if self.__mfEnabled: return [_HtX,]
447             else:                return _HtX
448
449     # ---------------------------------------------------------
450     def AdjointOperator(self, paire, **extraArgs ):
451         """
452         Calcul de l'adjoint à l'aide de la Jacobienne.
453
454         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
455         ne doivent pas être données ici à la fonction utilisateur.
456         """
457         if self.__mfEnabled:
458             assert len(paire) == 1, "Incorrect length of arguments"
459             _paire = paire[0]
460             assert len(_paire) == 2, "Incorrect number of arguments"
461         else:
462             assert len(paire) == 2, "Incorrect number of arguments"
463             _paire = paire
464         X, Y = _paire
465         if Y is None or len(Y) == 0:
466             #
467             # Calcul de la forme matricielle si le second argument est None
468             # -------------------------------------------------------------
469             _JacobienneT = self.TangentMatrix( X ).T
470             if self.__mfEnabled: return [_JacobienneT,]
471             else:                return _JacobienneT
472         else:
473             #
474             # Calcul de la valeur de l'adjoint en X appliqué à Y
475             # --------------------------------------------------
476             _HaY = self.TangentMatrix( X, dotTWith = Y )
477             if self.__mfEnabled: return [_HaY,]
478             else:                return _HaY
479
480 # ==============================================================================
481 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
482     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
483     #
484     _bgcenter = numpy.ravel(_bgcenter)[:,None]
485     if _nbmembers < 1:
486         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
487     #
488     if _bgcovariance is None:
489         _Perturbations = numpy.tile( _bgcenter, _nbmembers)
490     else:
491         _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
492         _Perturbations = numpy.tile( _bgcenter, _nbmembers) + _Z
493     #
494     return _Perturbations
495
496 # ==============================================================================
497 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
498     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
499     def __CenteredRandomAnomalies(Zr, N):
500         """
501         Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
502         notes manuscrites de MB et conforme au code de PS avec eps = -1
503         """
504         eps = -1
505         Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
506         Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
507         R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
508         Q = numpy.dot(Q,R)
509         Zr = numpy.dot(Q,Zr)
510         return Zr.T
511     #
512     _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
513     if _nbmembers < 1:
514         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
515     if _bgcovariance is None:
516         _Perturbations = numpy.tile( _bgcenter, _nbmembers)
517     else:
518         if _withSVD:
519             _U, _s, _V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
520             _nbctl = _bgcenter.size
521             if _nbmembers > _nbctl:
522                 _Z = numpy.concatenate((numpy.dot(
523                     numpy.diag(numpy.sqrt(_s[:_nbctl])), _V[:_nbctl]),
524                     numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
525             else:
526                 _Z = numpy.dot(numpy.diag(numpy.sqrt(_s[:_nbmembers-1])), _V[:_nbmembers-1])
527             _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
528             _Perturbations = _bgcenter + _Zca
529         else:
530             if max(abs(_bgcovariance.flatten())) > 0:
531                 _nbctl = _bgcenter.size
532                 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
533                 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
534                 _Perturbations = _bgcenter + _Zca
535             else:
536                 _Perturbations = numpy.tile( _bgcenter, _nbmembers)
537     #
538     return _Perturbations
539
540 # ==============================================================================
541 def EnsembleMean( __Ensemble ):
542     "Renvoie la moyenne empirique d'un ensemble"
543     return numpy.asarray(__Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
544
545 # ==============================================================================
546 def EnsembleOfAnomalies( __Ensemble, __OptMean = None, __Normalisation = 1.):
547     "Renvoie les anomalies centrées à partir d'un ensemble"
548     if __OptMean is None:
549         __Em = EnsembleMean( __Ensemble )
550     else:
551         __Em = numpy.ravel( __OptMean ).reshape((-1,1))
552     #
553     return __Normalisation * (numpy.asarray( __Ensemble ) - __Em)
554
555 # ==============================================================================
556 def EnsembleErrorCovariance( __Ensemble, __quick = False ):
557     "Renvoie l'estimation empirique de la covariance d'ensemble"
558     if __quick:
559         # Covariance rapide mais rarement définie positive
560         __Covariance = numpy.cov( __Ensemble )
561     else:
562         # Résultat souvent identique à numpy.cov, mais plus robuste
563         __n, __m = numpy.asarray( __Ensemble ).shape
564         __Anomalies = EnsembleOfAnomalies( __Ensemble )
565         # Estimation empirique
566         __Covariance = ( __Anomalies @ __Anomalies.T ) / (__m-1)
567         # Assure la symétrie
568         __Covariance = ( __Covariance + __Covariance.T ) * 0.5
569         # Assure la positivité
570         __epsilon    = mpr*numpy.trace( __Covariance )
571         __Covariance = __Covariance + __epsilon * numpy.identity(__n)
572     #
573     return __Covariance
574
575 # ==============================================================================
576 def EnsemblePerturbationWithGivenCovariance( __Ensemble, __Covariance, __Seed=None ):
577     "Ajout d'une perturbation à chaque membre d'un ensemble selon une covariance prescrite"
578     if hasattr(__Covariance,"assparsematrix"):
579         if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance.assparsematrix())/abs(__Ensemble).mean() < mpr).all():
580             # Traitement d'une covariance nulle ou presque
581             return __Ensemble
582         if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance.assparsematrix()) < mpr).all():
583             # Traitement d'une covariance nulle ou presque
584             return __Ensemble
585     else:
586         if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance)/abs(__Ensemble).mean() < mpr).all():
587             # Traitement d'une covariance nulle ou presque
588             return __Ensemble
589         if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance) < mpr).all():
590             # Traitement d'une covariance nulle ou presque
591             return __Ensemble
592     #
593     __n, __m = __Ensemble.shape
594     if __Seed is not None: numpy.random.seed(__Seed)
595     #
596     if hasattr(__Covariance,"isscalar") and __Covariance.isscalar():
597         # Traitement d'une covariance multiple de l'identité
598         __zero = 0.
599         __std  = numpy.sqrt(__Covariance.assparsematrix())
600         __Ensemble += numpy.random.normal(__zero, __std, size=(__m,__n)).T
601     #
602     elif hasattr(__Covariance,"isvector") and __Covariance.isvector():
603         # Traitement d'une covariance diagonale avec variances non identiques
604         __zero = numpy.zeros(__n)
605         __std  = numpy.sqrt(__Covariance.assparsematrix())
606         __Ensemble += numpy.asarray([numpy.random.normal(__zero, __std) for i in range(__m)]).T
607     #
608     elif hasattr(__Covariance,"ismatrix") and __Covariance.ismatrix():
609         # Traitement d'une covariance pleine
610         __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance.asfullmatrix(__n), size=__m).T
611     #
612     elif isinstance(__Covariance, numpy.ndarray):
613         # Traitement d'une covariance numpy pleine, sachant qu'on arrive ici en dernier
614         __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance, size=__m).T
615     #
616     else:
617         raise ValueError("Error in ensemble perturbation with inadequate covariance specification")
618     #
619     return __Ensemble
620
621 # ==============================================================================
622 def CovarianceInflation(
623         InputCovOrEns,
624         InflationType   = None,
625         InflationFactor = None,
626         BackgroundCov   = None,
627         ):
628     """
629     Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
630
631     Synthèse : Hunt 2007, section 2.3.5
632     """
633     if InflationFactor is None:
634         return InputCovOrEns
635     else:
636         InflationFactor = float(InflationFactor)
637     #
638     if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
639         if InflationFactor < 1.:
640             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
641         if InflationFactor < 1.+mpr:
642             return InputCovOrEns
643         OutputCovOrEns = InflationFactor**2 * InputCovOrEns
644     #
645     elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
646         if InflationFactor < 1.:
647             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
648         if InflationFactor < 1.+mpr:
649             return InputCovOrEns
650         InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
651         OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
652             + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
653     #
654     elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
655         if InflationFactor < 0.:
656             raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
657         if InflationFactor < mpr:
658             return InputCovOrEns
659         __n, __m = numpy.asarray(InputCovOrEns).shape
660         if __n != __m:
661             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
662         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
663     #
664     elif InflationType == "HybridOnBackgroundCovariance":
665         if InflationFactor < 0.:
666             raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
667         if InflationFactor < mpr:
668             return InputCovOrEns
669         __n, __m = numpy.asarray(InputCovOrEns).shape
670         if __n != __m:
671             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
672         if BackgroundCov is None:
673             raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
674         if InputCovOrEns.shape != BackgroundCov.shape:
675             raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
676         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
677     #
678     elif InflationType == "Relaxation":
679         raise NotImplementedError("InflationType Relaxation")
680     #
681     else:
682         raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
683     #
684     return OutputCovOrEns
685
686 # ==============================================================================
687 def HessienneEstimation(nb, HaM, HtM, BI, RI):
688     "Estimation de la Hessienne"
689     #
690     HessienneI = []
691     for i in range(int(nb)):
692         _ee    = numpy.zeros((nb,1))
693         _ee[i] = 1.
694         _HtEE  = numpy.dot(HtM,_ee).reshape((-1,1))
695         HessienneI.append( numpy.ravel( BI * _ee + HaM * (RI * _HtEE) ) )
696     #
697     A = numpy.linalg.inv(numpy.array( HessienneI ))
698     #
699     if min(A.shape) != max(A.shape):
700         raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
701     if (numpy.diag(A) < 0).any():
702         raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
703     if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
704         try:
705             L = numpy.linalg.cholesky( A )
706         except:
707             raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
708     #
709     return A
710
711 # ==============================================================================
712 def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None):
713     "Estimation des quantiles a posteriori (selfA est modifié)"
714     nbsamples = selfA._parameters["NumberOfSamplesForQuantiles"]
715     #
716     # Traitement des bornes
717     if "StateBoundsForQuantiles" in selfA._parameters:
718         LBounds = selfA._parameters["StateBoundsForQuantiles"] # Prioritaire
719     elif "Bounds" in selfA._parameters:
720         LBounds = selfA._parameters["Bounds"]  # Défaut raisonnable
721     else:
722         LBounds = None
723     if LBounds is not None:
724         LBounds = ForceNumericBounds( LBounds )
725     _Xa = numpy.ravel(Xa)
726     #
727     # Échantillonnage des états
728     YfQ  = None
729     EXr  = None
730     for i in range(nbsamples):
731         if selfA._parameters["SimulationForQuantiles"] == "Linear" and HtM is not None and HXa is not None:
732             dXr = (numpy.random.multivariate_normal(_Xa,A) - _Xa).reshape((-1,1))
733             if LBounds is not None: # "EstimateProjection" par défaut
734                 dXr = numpy.max(numpy.hstack((dXr,LBounds[:,0].reshape((-1,1))) - Xa),axis=1)
735                 dXr = numpy.min(numpy.hstack((dXr,LBounds[:,1].reshape((-1,1))) - Xa),axis=1)
736             dYr = HtM @ dXr
737             Yr = HXa.reshape((-1,1)) + dYr
738             if selfA._toStore("SampledStateForQuantiles"): Xr = _Xa + numpy.ravel(dXr)
739         elif selfA._parameters["SimulationForQuantiles"] == "NonLinear" and Hm is not None:
740             Xr = numpy.random.multivariate_normal(_Xa,A)
741             if LBounds is not None: # "EstimateProjection" par défaut
742                 Xr = numpy.max(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,0].reshape((-1,1)))),axis=1)
743                 Xr = numpy.min(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,1].reshape((-1,1)))),axis=1)
744             Yr = numpy.asarray(Hm( Xr ))
745         else:
746             raise ValueError("Quantile simulations has only to be Linear or NonLinear.")
747         #
748         if YfQ is None:
749             YfQ = Yr.reshape((-1,1))
750             if selfA._toStore("SampledStateForQuantiles"): EXr = Xr.reshape((-1,1))
751         else:
752             YfQ = numpy.hstack((YfQ,Yr.reshape((-1,1))))
753             if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.hstack((EXr,Xr.reshape((-1,1))))
754     #
755     # Extraction des quantiles
756     YfQ.sort(axis=-1)
757     YQ = None
758     for quantile in selfA._parameters["Quantiles"]:
759         if not (0. <= float(quantile) <= 1.): continue
760         indice = int(nbsamples * float(quantile) - 1./nbsamples)
761         if YQ is None: YQ = YfQ[:,indice].reshape((-1,1))
762         else:          YQ = numpy.hstack((YQ,YfQ[:,indice].reshape((-1,1))))
763     if YQ is not None: # Liste non vide de quantiles
764         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
765     if selfA._toStore("SampledStateForQuantiles"):
766         selfA.StoredVariables["SampledStateForQuantiles"].store( EXr )
767     #
768     return 0
769
770 # ==============================================================================
771 def ForceNumericBounds( __Bounds ):
772     "Force les bornes à être des valeurs numériques, sauf si globalement None"
773     # Conserve une valeur par défaut à None s'il n'y a pas de bornes
774     if __Bounds is None: return None
775     # Converti toutes les bornes individuelles None à +/- l'infini
776     __Bounds = numpy.asarray( __Bounds, dtype=float )
777     if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
778         raise ValueError("Incorrectly shaped bounds data")
779     __Bounds[numpy.isnan(__Bounds[:,0]),0] = -sys.float_info.max
780     __Bounds[numpy.isnan(__Bounds[:,1]),1] =  sys.float_info.max
781     return __Bounds
782
783 # ==============================================================================
784 def RecentredBounds( __Bounds, __Center):
785     "Recentre les bornes autour de 0, sauf si globalement None"
786     # Conserve une valeur par défaut à None s'il n'y a pas de bornes
787     if __Bounds is None: return None
788     # Recentre les valeurs numériques de bornes
789     return ForceNumericBounds( __Bounds ) - numpy.ravel( __Center ).reshape((-1,1))
790
791 # ==============================================================================
792 def ApplyBounds( __Vector, __Bounds, __newClip = True):
793     "Applique des bornes numériques à un point"
794     # Conserve une valeur par défaut s'il n'y a pas de bornes
795     if __Bounds is None: return __Vector
796     #
797     if not isinstance(__Vector, numpy.ndarray): # Is an array
798         raise ValueError("Incorrect array definition of vector data")
799     if not isinstance(__Bounds, numpy.ndarray): # Is an array
800         raise ValueError("Incorrect array definition of bounds data")
801     if 2*__Vector.size != __Bounds.size: # Is a 2 column array of vector lenght
802         raise ValueError("Incorrect bounds number (%i) to be applied for this vector (of size %i)"%(__Bounds.size,__Vector.size))
803     if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
804         raise ValueError("Incorrectly shaped bounds data")
805     #
806     if __newClip:
807         __Vector = __Vector.clip(
808             __Bounds[:,0].reshape(__Vector.shape),
809             __Bounds[:,1].reshape(__Vector.shape),
810             )
811     else:
812         __Vector = numpy.max(numpy.hstack((__Vector.reshape((-1,1)),numpy.asmatrix(__Bounds)[:,0])),axis=1)
813         __Vector = numpy.min(numpy.hstack((__Vector.reshape((-1,1)),numpy.asmatrix(__Bounds)[:,1])),axis=1)
814         __Vector = numpy.asarray(__Vector)
815     #
816     return __Vector
817
818 # ==============================================================================
819 def Apply3DVarRecentringOnEnsemble(__EnXn, __EnXf, __Ynpu, __HO, __R, __B, __Betaf):
820     "Recentre l'ensemble Xn autour de l'analyse 3DVAR"
821     #
822     Xf = EnsembleMean( __EnXf )
823     Pf = Covariance( asCovariance=EnsembleErrorCovariance(__EnXf) )
824     Pf = (1 - __Betaf) * __B + __Betaf * Pf
825     #
826     selfB = PartialAlgorithm("3DVAR")
827     selfB._parameters["Minimizer"] = "LBFGSB"
828     selfB._parameters["MaximumNumberOfSteps"] = 15000
829     selfB._parameters["CostDecrementTolerance"] = 1.e-7
830     selfB._parameters["ProjectedGradientTolerance"] = -1
831     selfB._parameters["GradientNormTolerance"] = 1.e-05
832     selfB._parameters["StoreInternalVariables"] = False
833     selfB._parameters["optiprint"] = -1
834     selfB._parameters["optdisp"] = 0
835     selfB._parameters["Bounds"] = None
836     selfB._parameters["InitializationPoint"] = Xf
837     std3dvar(selfB, Xf, __Ynpu, None, __HO, None, None, __R, Pf, None)
838     Xa = selfB.get("Analysis")[-1].reshape((-1,1))
839     del selfB
840     #
841     return Xa + EnsembleOfAnomalies( __EnXn )
842
843 # ==============================================================================
844 def c2ukf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
845     """
846     Constrained Unscented Kalman Filter
847     """
848     if selfA._parameters["EstimationOf"] == "Parameters":
849         selfA._parameters["StoreInternalVariables"] = True
850     selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
851     #
852     L     = Xb.size
853     Alpha = selfA._parameters["Alpha"]
854     Beta  = selfA._parameters["Beta"]
855     if selfA._parameters["Kappa"] == 0:
856         if selfA._parameters["EstimationOf"] == "State":
857             Kappa = 0
858         elif selfA._parameters["EstimationOf"] == "Parameters":
859             Kappa = 3 - L
860     else:
861         Kappa = selfA._parameters["Kappa"]
862     Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
863     Gamma  = math.sqrt( L + Lambda )
864     #
865     Ww = []
866     Ww.append( 0. )
867     for i in range(2*L):
868         Ww.append( 1. / (2.*(L + Lambda)) )
869     #
870     Wm = numpy.array( Ww )
871     Wm[0] = Lambda / (L + Lambda)
872     Wc = numpy.array( Ww )
873     Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
874     #
875     # Opérateurs
876     Hm = HO["Direct"].appliedControledFormTo
877     #
878     if selfA._parameters["EstimationOf"] == "State":
879         Mm = EM["Direct"].appliedControledFormTo
880     #
881     if CM is not None and "Tangent" in CM and U is not None:
882         Cm = CM["Tangent"].asMatrix(Xb)
883     else:
884         Cm = None
885     #
886     # Durée d'observation et tailles
887     if hasattr(Y,"stepnumber"):
888         duration = Y.stepnumber()
889         __p = numpy.cumprod(Y.shape())[-1]
890     else:
891         duration = 2
892         __p = numpy.array(Y).size
893     #
894     # Précalcul des inversions de B et R
895     if selfA._parameters["StoreInternalVariables"] \
896         or selfA._toStore("CostFunctionJ") \
897         or selfA._toStore("CostFunctionJb") \
898         or selfA._toStore("CostFunctionJo") \
899         or selfA._toStore("CurrentOptimum") \
900         or selfA._toStore("APosterioriCovariance"):
901         BI = B.getI()
902         RI = R.getI()
903     #
904     __n = Xb.size
905     nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
906     #
907     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
908         Xn = Xb
909         if hasattr(B,"asfullmatrix"):
910             Pn = B.asfullmatrix(__n)
911         else:
912             Pn = B
913         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
914         selfA.StoredVariables["Analysis"].store( Xb )
915         if selfA._toStore("APosterioriCovariance"):
916             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
917     elif selfA._parameters["nextStep"]:
918         Xn = selfA._getInternalState("Xn")
919         Pn = selfA._getInternalState("Pn")
920     #
921     if selfA._parameters["EstimationOf"] == "Parameters":
922         XaMin            = Xn
923         previousJMinimum = numpy.finfo(float).max
924     #
925     for step in range(duration-1):
926         if hasattr(Y,"store"):
927             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
928         else:
929             Ynpu = numpy.ravel( Y ).reshape((__p,1))
930         #
931         if U is not None:
932             if hasattr(U,"store") and len(U)>1:
933                 Un = numpy.ravel( U[step] ).reshape((-1,1))
934             elif hasattr(U,"store") and len(U)==1:
935                 Un = numpy.ravel( U[0] ).reshape((-1,1))
936             else:
937                 Un = numpy.ravel( U ).reshape((-1,1))
938         else:
939             Un = None
940         #
941         Pndemi = numpy.real(scipy.linalg.sqrtm(Pn))
942         Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
943         nbSpts = 2*Xn.size+1
944         #
945         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
946             for point in range(nbSpts):
947                 Xnp[:,point] = ApplyBounds( Xnp[:,point], selfA._parameters["Bounds"] )
948         #
949         XEtnnp = []
950         for point in range(nbSpts):
951             if selfA._parameters["EstimationOf"] == "State":
952                 XEtnnpi = numpy.asarray( Mm( (Xnp[:,point], Un) ) ).reshape((-1,1))
953                 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
954                     Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
955                     XEtnnpi = XEtnnpi + Cm @ Un
956                 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
957                     XEtnnpi = ApplyBounds( XEtnnpi, selfA._parameters["Bounds"] )
958             elif selfA._parameters["EstimationOf"] == "Parameters":
959                 # --- > Par principe, M = Id, Q = 0
960                 XEtnnpi = Xnp[:,point]
961             XEtnnp.append( numpy.ravel(XEtnnpi).reshape((-1,1)) )
962         XEtnnp = numpy.concatenate( XEtnnp, axis=1 )
963         #
964         Xncm = ( XEtnnp * Wm ).sum(axis=1)
965         #
966         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
967             Xncm = ApplyBounds( Xncm, selfA._parameters["Bounds"] )
968         #
969         if selfA._parameters["EstimationOf"] == "State":        Pnm = Q
970         elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
971         for point in range(nbSpts):
972             Pnm += Wc[i] * ((XEtnnp[:,point]-Xncm).reshape((-1,1)) * (XEtnnp[:,point]-Xncm))
973         #
974         if selfA._parameters["EstimationOf"] == "Parameters" and selfA._parameters["Bounds"] is not None:
975             Pnmdemi = selfA._parameters["Reconditioner"] * numpy.real(scipy.linalg.sqrtm(Pnm))
976         else:
977             Pnmdemi = numpy.real(scipy.linalg.sqrtm(Pnm))
978         #
979         Xnnp = numpy.hstack([Xncm.reshape((-1,1)), Xncm.reshape((-1,1))+Gamma*Pnmdemi, Xncm.reshape((-1,1))-Gamma*Pnmdemi])
980         #
981         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
982             for point in range(nbSpts):
983                 Xnnp[:,point] = ApplyBounds( Xnnp[:,point], selfA._parameters["Bounds"] )
984         #
985         Ynnp = []
986         for point in range(nbSpts):
987             if selfA._parameters["EstimationOf"] == "State":
988                 Ynnpi = Hm( (Xnnp[:,point], None) )
989             elif selfA._parameters["EstimationOf"] == "Parameters":
990                 Ynnpi = Hm( (Xnnp[:,point], Un) )
991             Ynnp.append( numpy.ravel(Ynnpi).reshape((-1,1)) )
992         Ynnp = numpy.concatenate( Ynnp, axis=1 )
993         #
994         Yncm = ( Ynnp * Wm ).sum(axis=1)
995         #
996         Pyyn = R
997         Pxyn = 0.
998         for point in range(nbSpts):
999             Pyyn += Wc[i] * ((Ynnp[:,point]-Yncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
1000             Pxyn += Wc[i] * ((Xnnp[:,point]-Xncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
1001         #
1002         _Innovation  = Ynpu - Yncm.reshape((-1,1))
1003         if selfA._parameters["EstimationOf"] == "Parameters":
1004             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
1005                 _Innovation = _Innovation - Cm @ Un
1006         #
1007         Kn = Pxyn * Pyyn.I
1008         Xn = Xncm.reshape((-1,1)) + Kn * _Innovation
1009         Pn = Pnm - Kn * Pyyn * Kn.T
1010         #
1011         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1012             Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
1013         #
1014         Xa = Xn # Pointeurs
1015         #--------------------------
1016         selfA._setInternalState("Xn", Xn)
1017         selfA._setInternalState("Pn", Pn)
1018         #--------------------------
1019         #
1020         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1021         # ---> avec analysis
1022         selfA.StoredVariables["Analysis"].store( Xa )
1023         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1024             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) )
1025         if selfA._toStore("InnovationAtCurrentAnalysis"):
1026             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1027         # ---> avec current state
1028         if selfA._parameters["StoreInternalVariables"] \
1029             or selfA._toStore("CurrentState"):
1030             selfA.StoredVariables["CurrentState"].store( Xn )
1031         if selfA._toStore("ForecastState"):
1032             selfA.StoredVariables["ForecastState"].store( Xncm )
1033         if selfA._toStore("ForecastCovariance"):
1034             selfA.StoredVariables["ForecastCovariance"].store( Pnm )
1035         if selfA._toStore("BMA"):
1036             selfA.StoredVariables["BMA"].store( Xncm - Xa )
1037         if selfA._toStore("InnovationAtCurrentState"):
1038             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1039         if selfA._toStore("SimulatedObservationAtCurrentState") \
1040             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1041             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm )
1042         # ---> autres
1043         if selfA._parameters["StoreInternalVariables"] \
1044             or selfA._toStore("CostFunctionJ") \
1045             or selfA._toStore("CostFunctionJb") \
1046             or selfA._toStore("CostFunctionJo") \
1047             or selfA._toStore("CurrentOptimum") \
1048             or selfA._toStore("APosterioriCovariance"):
1049             Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
1050             Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
1051             J   = Jb + Jo
1052             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1053             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1054             selfA.StoredVariables["CostFunctionJ" ].store( J )
1055             #
1056             if selfA._toStore("IndexOfOptimum") \
1057                 or selfA._toStore("CurrentOptimum") \
1058                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1059                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1060                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1061                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1062                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1063             if selfA._toStore("IndexOfOptimum"):
1064                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1065             if selfA._toStore("CurrentOptimum"):
1066                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1067             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1068                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1069             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1070                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1071             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1072                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1073             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1074                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1075         if selfA._toStore("APosterioriCovariance"):
1076             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1077         if selfA._parameters["EstimationOf"] == "Parameters" \
1078             and J < previousJMinimum:
1079             previousJMinimum    = J
1080             XaMin               = Xa
1081             if selfA._toStore("APosterioriCovariance"):
1082                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1083     #
1084     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1085     # ----------------------------------------------------------------------
1086     if selfA._parameters["EstimationOf"] == "Parameters":
1087         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1088         selfA.StoredVariables["Analysis"].store( XaMin )
1089         if selfA._toStore("APosterioriCovariance"):
1090             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1091         if selfA._toStore("BMA"):
1092             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1093     #
1094     return 0
1095
1096 # ==============================================================================
1097 def cekf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1098     """
1099     Contrained Extended Kalman Filter
1100     """
1101     if selfA._parameters["EstimationOf"] == "Parameters":
1102         selfA._parameters["StoreInternalVariables"] = True
1103     selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
1104     #
1105     # Opérateurs
1106     H = HO["Direct"].appliedControledFormTo
1107     #
1108     if selfA._parameters["EstimationOf"] == "State":
1109         M = EM["Direct"].appliedControledFormTo
1110     #
1111     if CM is not None and "Tangent" in CM and U is not None:
1112         Cm = CM["Tangent"].asMatrix(Xb)
1113     else:
1114         Cm = None
1115     #
1116     # Durée d'observation et tailles
1117     if hasattr(Y,"stepnumber"):
1118         duration = Y.stepnumber()
1119         __p = numpy.cumprod(Y.shape())[-1]
1120     else:
1121         duration = 2
1122         __p = numpy.array(Y).size
1123     #
1124     # Précalcul des inversions de B et R
1125     if selfA._parameters["StoreInternalVariables"] \
1126         or selfA._toStore("CostFunctionJ") \
1127         or selfA._toStore("CostFunctionJb") \
1128         or selfA._toStore("CostFunctionJo") \
1129         or selfA._toStore("CurrentOptimum") \
1130         or selfA._toStore("APosterioriCovariance"):
1131         BI = B.getI()
1132         RI = R.getI()
1133     #
1134     __n = Xb.size
1135     nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
1136     #
1137     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1138         Xn = Xb
1139         Pn = B
1140         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1141         selfA.StoredVariables["Analysis"].store( Xb )
1142         if selfA._toStore("APosterioriCovariance"):
1143             if hasattr(B,"asfullmatrix"):
1144                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1145             else:
1146                 selfA.StoredVariables["APosterioriCovariance"].store( B )
1147         selfA._setInternalState("seed", numpy.random.get_state())
1148     elif selfA._parameters["nextStep"]:
1149         Xn = selfA._getInternalState("Xn")
1150         Pn = selfA._getInternalState("Pn")
1151     #
1152     if selfA._parameters["EstimationOf"] == "Parameters":
1153         XaMin            = Xn
1154         previousJMinimum = numpy.finfo(float).max
1155     #
1156     for step in range(duration-1):
1157         if hasattr(Y,"store"):
1158             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1159         else:
1160             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1161         #
1162         Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
1163         Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
1164         Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1165         Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
1166         #
1167         if selfA._parameters["EstimationOf"] == "State":
1168             Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
1169             Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
1170             Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1171             Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
1172         #
1173         if U is not None:
1174             if hasattr(U,"store") and len(U)>1:
1175                 Un = numpy.ravel( U[step] ).reshape((-1,1))
1176             elif hasattr(U,"store") and len(U)==1:
1177                 Un = numpy.ravel( U[0] ).reshape((-1,1))
1178             else:
1179                 Un = numpy.ravel( U ).reshape((-1,1))
1180         else:
1181             Un = None
1182         #
1183         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1184             Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
1185         #
1186         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1187             Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
1188             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1189                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1190                 Xn_predicted = Xn_predicted + Cm @ Un
1191             Pn_predicted = Q + Mt * (Pn * Ma)
1192         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1193             # --- > Par principe, M = Id, Q = 0
1194             Xn_predicted = Xn
1195             Pn_predicted = Pn
1196         #
1197         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1198             Xn_predicted = ApplyBounds( Xn_predicted, selfA._parameters["Bounds"] )
1199         #
1200         if selfA._parameters["EstimationOf"] == "State":
1201             HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
1202             _Innovation  = Ynpu - HX_predicted
1203         elif selfA._parameters["EstimationOf"] == "Parameters":
1204             HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
1205             _Innovation  = Ynpu - HX_predicted
1206             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
1207                 _Innovation = _Innovation - Cm @ Un
1208         #
1209         Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
1210         Xn = Xn_predicted + Kn * _Innovation
1211         Pn = Pn_predicted - Kn * Ht * Pn_predicted
1212         #
1213         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1214             Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
1215         #
1216         Xa = Xn # Pointeurs
1217         #--------------------------
1218         selfA._setInternalState("Xn", Xn)
1219         selfA._setInternalState("Pn", Pn)
1220         #--------------------------
1221         #
1222         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1223         # ---> avec analysis
1224         selfA.StoredVariables["Analysis"].store( Xa )
1225         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1226             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
1227         if selfA._toStore("InnovationAtCurrentAnalysis"):
1228             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1229         # ---> avec current state
1230         if selfA._parameters["StoreInternalVariables"] \
1231             or selfA._toStore("CurrentState"):
1232             selfA.StoredVariables["CurrentState"].store( Xn )
1233         if selfA._toStore("ForecastState"):
1234             selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1235         if selfA._toStore("ForecastCovariance"):
1236             selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
1237         if selfA._toStore("BMA"):
1238             selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
1239         if selfA._toStore("InnovationAtCurrentState"):
1240             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1241         if selfA._toStore("SimulatedObservationAtCurrentState") \
1242             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1243             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1244         # ---> autres
1245         if selfA._parameters["StoreInternalVariables"] \
1246             or selfA._toStore("CostFunctionJ") \
1247             or selfA._toStore("CostFunctionJb") \
1248             or selfA._toStore("CostFunctionJo") \
1249             or selfA._toStore("CurrentOptimum") \
1250             or selfA._toStore("APosterioriCovariance"):
1251             Jb  = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
1252             Jo  = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
1253             J   = Jb + Jo
1254             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1255             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1256             selfA.StoredVariables["CostFunctionJ" ].store( J )
1257             #
1258             if selfA._toStore("IndexOfOptimum") \
1259                 or selfA._toStore("CurrentOptimum") \
1260                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1261                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1262                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1263                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1264                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1265             if selfA._toStore("IndexOfOptimum"):
1266                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1267             if selfA._toStore("CurrentOptimum"):
1268                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1269             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1270                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1271             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1272                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1273             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1274                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1275             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1276                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1277         if selfA._toStore("APosterioriCovariance"):
1278             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1279         if selfA._parameters["EstimationOf"] == "Parameters" \
1280             and J < previousJMinimum:
1281             previousJMinimum    = J
1282             XaMin               = Xa
1283             if selfA._toStore("APosterioriCovariance"):
1284                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1285     #
1286     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1287     # ----------------------------------------------------------------------
1288     if selfA._parameters["EstimationOf"] == "Parameters":
1289         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1290         selfA.StoredVariables["Analysis"].store( XaMin )
1291         if selfA._toStore("APosterioriCovariance"):
1292             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1293         if selfA._toStore("BMA"):
1294             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1295     #
1296     return 0
1297
1298 # ==============================================================================
1299 def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
1300     """
1301     EnKS
1302     """
1303     #
1304     # Opérateurs
1305     H = HO["Direct"].appliedControledFormTo
1306     #
1307     if selfA._parameters["EstimationOf"] == "State":
1308         M = EM["Direct"].appliedControledFormTo
1309     #
1310     if CM is not None and "Tangent" in CM and U is not None:
1311         Cm = CM["Tangent"].asMatrix(Xb)
1312     else:
1313         Cm = None
1314     #
1315     # Précalcul des inversions de B et R
1316     RIdemi = R.sqrtmI()
1317     #
1318     # Durée d'observation et tailles
1319     LagL = selfA._parameters["SmootherLagL"]
1320     if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
1321         raise ValueError("Fixed-lag smoother requires a series of observation")
1322     if Y.stepnumber() < LagL:
1323         raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
1324     duration = Y.stepnumber()
1325     __p = numpy.cumprod(Y.shape())[-1]
1326     __n = Xb.size
1327     __m = selfA._parameters["NumberOfMembers"]
1328     #
1329     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1330         selfA.StoredVariables["Analysis"].store( Xb )
1331         if selfA._toStore("APosterioriCovariance"):
1332             if hasattr(B,"asfullmatrix"):
1333                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1334             else:
1335                 selfA.StoredVariables["APosterioriCovariance"].store( B )
1336     #
1337     # Calcul direct initial (on privilégie la mémorisation au recalcul)
1338     __seed = numpy.random.get_state()
1339     selfB = copy.deepcopy(selfA)
1340     selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
1341     if VariantM == "EnKS16-KalmanFilterFormula":
1342         etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
1343     else:
1344         raise ValueError("VariantM has to be chosen in the authorized methods list.")
1345     if LagL > 0:
1346         EL  = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
1347     else:
1348         EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
1349     selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
1350     #
1351     for step in range(LagL,duration-1):
1352         #
1353         sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
1354         sEL.append(None)
1355         #
1356         if hasattr(Y,"store"):
1357             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1358         else:
1359             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1360         #
1361         if U is not None:
1362             if hasattr(U,"store") and len(U)>1:
1363                 Un = numpy.ravel( U[step] ).reshape((-1,1))
1364             elif hasattr(U,"store") and len(U)==1:
1365                 Un = numpy.ravel( U[0] ).reshape((-1,1))
1366             else:
1367                 Un = numpy.ravel( U ).reshape((-1,1))
1368         else:
1369             Un = None
1370         #
1371         #--------------------------
1372         if VariantM == "EnKS16-KalmanFilterFormula":
1373             if selfA._parameters["EstimationOf"] == "State": # Forecast
1374                 EL = M( [(EL[:,i], Un) for i in range(__m)],
1375                     argsAsSerie = True,
1376                     returnSerieAsArrayMatrix = True )
1377                 EL = EnsemblePerturbationWithGivenCovariance( EL, Q )
1378                 EZ = H( [(EL[:,i], Un) for i in range(__m)],
1379                     argsAsSerie = True,
1380                     returnSerieAsArrayMatrix = True )
1381                 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1382                     Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1383                     EZ = EZ + Cm @ Un
1384             elif selfA._parameters["EstimationOf"] == "Parameters":
1385                 # --- > Par principe, M = Id, Q = 0
1386                 EZ = H( [(EL[:,i], Un) for i in range(__m)],
1387                     argsAsSerie = True,
1388                     returnSerieAsArrayMatrix = True )
1389             #
1390             vEm   = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1391             vZm   = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1392             #
1393             mS    = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
1394             mS    = mS.reshape((-1,__m)) # Pour dimension 1
1395             delta = RIdemi @ ( Ynpu - vZm )
1396             mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
1397             vw    = mT @ mS.T @ delta
1398             #
1399             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
1400             mU    = numpy.identity(__m)
1401             wTU   = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
1402             #
1403             EX    = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
1404             EL    = vEm + EX @ wTU
1405             #
1406             sEL[LagL] = EL
1407             for irl in range(LagL): # Lissage des L précédentes analysis
1408                 vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1409                 EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
1410                 sEL[irl] = vEm + EX @ wTU
1411             #
1412             # Conservation de l'analyse retrospective d'ordre 0 avant rotation
1413             Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1414             if selfA._toStore("APosterioriCovariance"):
1415                 EXn = sEL[0]
1416             #
1417             for irl in range(LagL):
1418                 sEL[irl] = sEL[irl+1]
1419             sEL[LagL] = None
1420         #--------------------------
1421         else:
1422             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1423         #
1424         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1425         # ---> avec analysis
1426         selfA.StoredVariables["Analysis"].store( Xa )
1427         if selfA._toStore("APosterioriCovariance"):
1428             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
1429     #
1430     # Stockage des dernières analyses incomplètement remises à jour
1431     for irl in range(LagL):
1432         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1433         Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1434         selfA.StoredVariables["Analysis"].store( Xa )
1435     #
1436     return 0
1437
1438 # ==============================================================================
1439 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
1440     VariantM="KalmanFilterFormula",
1441     Hybrid=None,
1442     ):
1443     """
1444     Ensemble-Transform EnKF
1445     """
1446     if selfA._parameters["EstimationOf"] == "Parameters":
1447         selfA._parameters["StoreInternalVariables"] = True
1448     #
1449     # Opérateurs
1450     H = HO["Direct"].appliedControledFormTo
1451     #
1452     if selfA._parameters["EstimationOf"] == "State":
1453         M = EM["Direct"].appliedControledFormTo
1454     #
1455     if CM is not None and "Tangent" in CM and U is not None:
1456         Cm = CM["Tangent"].asMatrix(Xb)
1457     else:
1458         Cm = None
1459     #
1460     # Durée d'observation et tailles
1461     if hasattr(Y,"stepnumber"):
1462         duration = Y.stepnumber()
1463         __p = numpy.cumprod(Y.shape())[-1]
1464     else:
1465         duration = 2
1466         __p = numpy.array(Y).size
1467     #
1468     # Précalcul des inversions de B et R
1469     if selfA._parameters["StoreInternalVariables"] \
1470         or selfA._toStore("CostFunctionJ") \
1471         or selfA._toStore("CostFunctionJb") \
1472         or selfA._toStore("CostFunctionJo") \
1473         or selfA._toStore("CurrentOptimum") \
1474         or selfA._toStore("APosterioriCovariance"):
1475         BI = B.getI()
1476         RI = R.getI()
1477     elif VariantM != "KalmanFilterFormula":
1478         RI = R.getI()
1479     if VariantM == "KalmanFilterFormula":
1480         RIdemi = R.sqrtmI()
1481     #
1482     __n = Xb.size
1483     __m = selfA._parameters["NumberOfMembers"]
1484     nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
1485     previousJMinimum = numpy.finfo(float).max
1486     #
1487     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1488         Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1489         selfA.StoredVariables["Analysis"].store( Xb )
1490         if selfA._toStore("APosterioriCovariance"):
1491             if hasattr(B,"asfullmatrix"):
1492                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1493             else:
1494                 selfA.StoredVariables["APosterioriCovariance"].store( B )
1495         selfA._setInternalState("seed", numpy.random.get_state())
1496     elif selfA._parameters["nextStep"]:
1497         Xn = selfA._getInternalState("Xn")
1498     #
1499     for step in range(duration-1):
1500         numpy.random.set_state(selfA._getInternalState("seed"))
1501         if hasattr(Y,"store"):
1502             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1503         else:
1504             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1505         #
1506         if U is not None:
1507             if hasattr(U,"store") and len(U)>1:
1508                 Un = numpy.ravel( U[step] ).reshape((-1,1))
1509             elif hasattr(U,"store") and len(U)==1:
1510                 Un = numpy.ravel( U[0] ).reshape((-1,1))
1511             else:
1512                 Un = numpy.ravel( U ).reshape((-1,1))
1513         else:
1514             Un = None
1515         #
1516         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1517             Xn = CovarianceInflation( Xn,
1518                 selfA._parameters["InflationType"],
1519                 selfA._parameters["InflationFactor"],
1520                 )
1521         #
1522         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1523             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1524                 argsAsSerie = True,
1525                 returnSerieAsArrayMatrix = True )
1526             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
1527             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1528                 argsAsSerie = True,
1529                 returnSerieAsArrayMatrix = True )
1530             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1531                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1532                 Xn_predicted = Xn_predicted + Cm @ Un
1533         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1534             # --- > Par principe, M = Id, Q = 0
1535             Xn_predicted = EMX = Xn
1536             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1537                 argsAsSerie = True,
1538                 returnSerieAsArrayMatrix = True )
1539         #
1540         # Mean of forecast and observation of forecast
1541         Xfm  = EnsembleMean( Xn_predicted )
1542         Hfm  = EnsembleMean( HX_predicted )
1543         #
1544         # Anomalies
1545         EaX   = EnsembleOfAnomalies( Xn_predicted, Xfm )
1546         EaHX  = EnsembleOfAnomalies( HX_predicted, Hfm)
1547         #
1548         #--------------------------
1549         if VariantM == "KalmanFilterFormula":
1550             mS    = RIdemi * EaHX / math.sqrt(__m-1)
1551             mS    = mS.reshape((-1,__m)) # Pour dimension 1
1552             delta = RIdemi * ( Ynpu - Hfm )
1553             mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
1554             vw    = mT @ mS.T @ delta
1555             #
1556             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
1557             mU    = numpy.identity(__m)
1558             #
1559             EaX   = EaX / math.sqrt(__m-1)
1560             Xn    = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
1561         #--------------------------
1562         elif VariantM == "Variational":
1563             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1564             def CostFunction(w):
1565                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1566                 _Jo = 0.5 * _A.T @ (RI * _A)
1567                 _Jb = 0.5 * (__m-1) * w.T @ w
1568                 _J  = _Jo + _Jb
1569                 return float(_J)
1570             def GradientOfCostFunction(w):
1571                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1572                 _GardJo = - EaHX.T @ (RI * _A)
1573                 _GradJb = (__m-1) * w.reshape((__m,1))
1574                 _GradJ  = _GardJo + _GradJb
1575                 return numpy.ravel(_GradJ)
1576             vw = scipy.optimize.fmin_cg(
1577                 f           = CostFunction,
1578                 x0          = numpy.zeros(__m),
1579                 fprime      = GradientOfCostFunction,
1580                 args        = (),
1581                 disp        = False,
1582                 )
1583             #
1584             Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1585             Htb = (__m-1) * numpy.identity(__m)
1586             Hta = Hto + Htb
1587             #
1588             Pta = numpy.linalg.inv( Hta )
1589             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1590             #
1591             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
1592         #--------------------------
1593         elif VariantM == "FiniteSize11": # Jauge Boc2011
1594             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1595             def CostFunction(w):
1596                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1597                 _Jo = 0.5 * _A.T @ (RI * _A)
1598                 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
1599                 _J  = _Jo + _Jb
1600                 return float(_J)
1601             def GradientOfCostFunction(w):
1602                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1603                 _GardJo = - EaHX.T @ (RI * _A)
1604                 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1605                 _GradJ  = _GardJo + _GradJb
1606                 return numpy.ravel(_GradJ)
1607             vw = scipy.optimize.fmin_cg(
1608                 f           = CostFunction,
1609                 x0          = numpy.zeros(__m),
1610                 fprime      = GradientOfCostFunction,
1611                 args        = (),
1612                 disp        = False,
1613                 )
1614             #
1615             Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1616             Htb = __m * \
1617                 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1618                 / (1 + 1/__m + vw.T @ vw)**2
1619             Hta = Hto + Htb
1620             #
1621             Pta = numpy.linalg.inv( Hta )
1622             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1623             #
1624             Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1625         #--------------------------
1626         elif VariantM == "FiniteSize15": # Jauge Boc2015
1627             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1628             def CostFunction(w):
1629                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1630                 _Jo = 0.5 * _A.T * (RI * _A)
1631                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
1632                 _J  = _Jo + _Jb
1633                 return float(_J)
1634             def GradientOfCostFunction(w):
1635                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1636                 _GardJo = - EaHX.T @ (RI * _A)
1637                 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1638                 _GradJ  = _GardJo + _GradJb
1639                 return numpy.ravel(_GradJ)
1640             vw = scipy.optimize.fmin_cg(
1641                 f           = CostFunction,
1642                 x0          = numpy.zeros(__m),
1643                 fprime      = GradientOfCostFunction,
1644                 args        = (),
1645                 disp        = False,
1646                 )
1647             #
1648             Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1649             Htb = (__m+1) * \
1650                 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1651                 / (1 + 1/__m + vw.T @ vw)**2
1652             Hta = Hto + Htb
1653             #
1654             Pta = numpy.linalg.inv( Hta )
1655             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1656             #
1657             Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1658         #--------------------------
1659         elif VariantM == "FiniteSize16": # Jauge Boc2016
1660             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1661             def CostFunction(w):
1662                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1663                 _Jo = 0.5 * _A.T @ (RI * _A)
1664                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
1665                 _J  = _Jo + _Jb
1666                 return float(_J)
1667             def GradientOfCostFunction(w):
1668                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1669                 _GardJo = - EaHX.T @ (RI * _A)
1670                 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
1671                 _GradJ  = _GardJo + _GradJb
1672                 return numpy.ravel(_GradJ)
1673             vw = scipy.optimize.fmin_cg(
1674                 f           = CostFunction,
1675                 x0          = numpy.zeros(__m),
1676                 fprime      = GradientOfCostFunction,
1677                 args        = (),
1678                 disp        = False,
1679                 )
1680             #
1681             Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1682             Htb = ((__m+1) / (__m-1)) * \
1683                 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
1684                 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
1685             Hta = Hto + Htb
1686             #
1687             Pta = numpy.linalg.inv( Hta )
1688             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1689             #
1690             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
1691         #--------------------------
1692         else:
1693             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1694         #
1695         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1696             Xn = CovarianceInflation( Xn,
1697                 selfA._parameters["InflationType"],
1698                 selfA._parameters["InflationFactor"],
1699                 )
1700         #
1701         if Hybrid == "E3DVAR":
1702             betaf = selfA._parameters["HybridCovarianceEquilibrium"]
1703             Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
1704         #
1705         Xa = EnsembleMean( Xn )
1706         #--------------------------
1707         selfA._setInternalState("Xn", Xn)
1708         selfA._setInternalState("seed", numpy.random.get_state())
1709         #--------------------------
1710         #
1711         if selfA._parameters["StoreInternalVariables"] \
1712             or selfA._toStore("CostFunctionJ") \
1713             or selfA._toStore("CostFunctionJb") \
1714             or selfA._toStore("CostFunctionJo") \
1715             or selfA._toStore("APosterioriCovariance") \
1716             or selfA._toStore("InnovationAtCurrentAnalysis") \
1717             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1718             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1719             _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
1720             _Innovation = Ynpu - _HXa
1721         #
1722         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1723         # ---> avec analysis
1724         selfA.StoredVariables["Analysis"].store( Xa )
1725         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1726             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1727         if selfA._toStore("InnovationAtCurrentAnalysis"):
1728             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1729         # ---> avec current state
1730         if selfA._parameters["StoreInternalVariables"] \
1731             or selfA._toStore("CurrentState"):
1732             selfA.StoredVariables["CurrentState"].store( Xn )
1733         if selfA._toStore("ForecastState"):
1734             selfA.StoredVariables["ForecastState"].store( EMX )
1735         if selfA._toStore("ForecastCovariance"):
1736             selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
1737         if selfA._toStore("BMA"):
1738             selfA.StoredVariables["BMA"].store( EMX - Xa )
1739         if selfA._toStore("InnovationAtCurrentState"):
1740             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1741         if selfA._toStore("SimulatedObservationAtCurrentState") \
1742             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1743             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1744         # ---> autres
1745         if selfA._parameters["StoreInternalVariables"] \
1746             or selfA._toStore("CostFunctionJ") \
1747             or selfA._toStore("CostFunctionJb") \
1748             or selfA._toStore("CostFunctionJo") \
1749             or selfA._toStore("CurrentOptimum") \
1750             or selfA._toStore("APosterioriCovariance"):
1751             Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
1752             Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
1753             J   = Jb + Jo
1754             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1755             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1756             selfA.StoredVariables["CostFunctionJ" ].store( J )
1757             #
1758             if selfA._toStore("IndexOfOptimum") \
1759                 or selfA._toStore("CurrentOptimum") \
1760                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1761                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1762                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1763                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1764                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1765             if selfA._toStore("IndexOfOptimum"):
1766                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1767             if selfA._toStore("CurrentOptimum"):
1768                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1769             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1770                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1771             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1772                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1773             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1774                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1775             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1776                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1777         if selfA._toStore("APosterioriCovariance"):
1778             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1779         if selfA._parameters["EstimationOf"] == "Parameters" \
1780             and J < previousJMinimum:
1781             previousJMinimum    = J
1782             XaMin               = Xa
1783             if selfA._toStore("APosterioriCovariance"):
1784                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1785         # ---> Pour les smoothers
1786         if selfA._toStore("CurrentEnsembleState"):
1787             selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
1788     #
1789     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1790     # ----------------------------------------------------------------------
1791     if selfA._parameters["EstimationOf"] == "Parameters":
1792         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1793         selfA.StoredVariables["Analysis"].store( XaMin )
1794         if selfA._toStore("APosterioriCovariance"):
1795             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1796         if selfA._toStore("BMA"):
1797             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1798     #
1799     return 0
1800
1801 # ==============================================================================
1802 def exkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1803     """
1804     Extended Kalman Filter
1805     """
1806     if selfA._parameters["EstimationOf"] == "Parameters":
1807         selfA._parameters["StoreInternalVariables"] = True
1808     #
1809     # Opérateurs
1810     H = HO["Direct"].appliedControledFormTo
1811     #
1812     if selfA._parameters["EstimationOf"] == "State":
1813         M = EM["Direct"].appliedControledFormTo
1814     #
1815     if CM is not None and "Tangent" in CM and U is not None:
1816         Cm = CM["Tangent"].asMatrix(Xb)
1817     else:
1818         Cm = None
1819     #
1820     # Durée d'observation et tailles
1821     if hasattr(Y,"stepnumber"):
1822         duration = Y.stepnumber()
1823         __p = numpy.cumprod(Y.shape())[-1]
1824     else:
1825         duration = 2
1826         __p = numpy.array(Y).size
1827     #
1828     # Précalcul des inversions de B et R
1829     if selfA._parameters["StoreInternalVariables"] \
1830         or selfA._toStore("CostFunctionJ") \
1831         or selfA._toStore("CostFunctionJb") \
1832         or selfA._toStore("CostFunctionJo") \
1833         or selfA._toStore("CurrentOptimum") \
1834         or selfA._toStore("APosterioriCovariance"):
1835         BI = B.getI()
1836         RI = R.getI()
1837     #
1838     __n = Xb.size
1839     nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
1840     #
1841     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1842         Xn = Xb
1843         Pn = B
1844         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1845         selfA.StoredVariables["Analysis"].store( Xb )
1846         if selfA._toStore("APosterioriCovariance"):
1847             if hasattr(B,"asfullmatrix"):
1848                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1849             else:
1850                 selfA.StoredVariables["APosterioriCovariance"].store( B )
1851         selfA._setInternalState("seed", numpy.random.get_state())
1852     elif selfA._parameters["nextStep"]:
1853         Xn = selfA._getInternalState("Xn")
1854         Pn = selfA._getInternalState("Pn")
1855     #
1856     if selfA._parameters["EstimationOf"] == "Parameters":
1857         XaMin            = Xn
1858         previousJMinimum = numpy.finfo(float).max
1859     #
1860     for step in range(duration-1):
1861         if hasattr(Y,"store"):
1862             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1863         else:
1864             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1865         #
1866         Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
1867         Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
1868         Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1869         Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
1870         #
1871         if selfA._parameters["EstimationOf"] == "State":
1872             Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
1873             Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
1874             Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1875             Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
1876         #
1877         if U is not None:
1878             if hasattr(U,"store") and len(U)>1:
1879                 Un = numpy.ravel( U[step] ).reshape((-1,1))
1880             elif hasattr(U,"store") and len(U)==1:
1881                 Un = numpy.ravel( U[0] ).reshape((-1,1))
1882             else:
1883                 Un = numpy.ravel( U ).reshape((-1,1))
1884         else:
1885             Un = None
1886         #
1887         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1888             Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
1889             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1890                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1891                 Xn_predicted = Xn_predicted + Cm @ Un
1892             Pn_predicted = Q + Mt * (Pn * Ma)
1893         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1894             # --- > Par principe, M = Id, Q = 0
1895             Xn_predicted = Xn
1896             Pn_predicted = Pn
1897         #
1898         if selfA._parameters["EstimationOf"] == "State":
1899             HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
1900             _Innovation  = Ynpu - HX_predicted
1901         elif selfA._parameters["EstimationOf"] == "Parameters":
1902             HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
1903             _Innovation  = Ynpu - HX_predicted
1904             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
1905                 _Innovation = _Innovation - Cm @ Un
1906         #
1907         Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
1908         Xn = Xn_predicted + Kn * _Innovation
1909         Pn = Pn_predicted - Kn * Ht * Pn_predicted
1910         #
1911         Xa = Xn # Pointeurs
1912         #--------------------------
1913         selfA._setInternalState("Xn", Xn)
1914         selfA._setInternalState("Pn", Pn)
1915         #--------------------------
1916         #
1917         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1918         # ---> avec analysis
1919         selfA.StoredVariables["Analysis"].store( Xa )
1920         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1921             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
1922         if selfA._toStore("InnovationAtCurrentAnalysis"):
1923             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1924         # ---> avec current state
1925         if selfA._parameters["StoreInternalVariables"] \
1926             or selfA._toStore("CurrentState"):
1927             selfA.StoredVariables["CurrentState"].store( Xn )
1928         if selfA._toStore("ForecastState"):
1929             selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1930         if selfA._toStore("ForecastCovariance"):
1931             selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
1932         if selfA._toStore("BMA"):
1933             selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
1934         if selfA._toStore("InnovationAtCurrentState"):
1935             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1936         if selfA._toStore("SimulatedObservationAtCurrentState") \
1937             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1938             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1939         # ---> autres
1940         if selfA._parameters["StoreInternalVariables"] \
1941             or selfA._toStore("CostFunctionJ") \
1942             or selfA._toStore("CostFunctionJb") \
1943             or selfA._toStore("CostFunctionJo") \
1944             or selfA._toStore("CurrentOptimum") \
1945             or selfA._toStore("APosterioriCovariance"):
1946             Jb  = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
1947             Jo  = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
1948             J   = Jb + Jo
1949             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1950             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1951             selfA.StoredVariables["CostFunctionJ" ].store( J )
1952             #
1953             if selfA._toStore("IndexOfOptimum") \
1954                 or selfA._toStore("CurrentOptimum") \
1955                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1956                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1957                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1958                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1959                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1960             if selfA._toStore("IndexOfOptimum"):
1961                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1962             if selfA._toStore("CurrentOptimum"):
1963                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1964             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1965                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1966             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1967                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1968             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1969                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1970             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1971                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1972         if selfA._toStore("APosterioriCovariance"):
1973             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1974         if selfA._parameters["EstimationOf"] == "Parameters" \
1975             and J < previousJMinimum:
1976             previousJMinimum    = J
1977             XaMin               = Xa
1978             if selfA._toStore("APosterioriCovariance"):
1979                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1980     #
1981     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1982     # ----------------------------------------------------------------------
1983     if selfA._parameters["EstimationOf"] == "Parameters":
1984         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1985         selfA.StoredVariables["Analysis"].store( XaMin )
1986         if selfA._toStore("APosterioriCovariance"):
1987             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1988         if selfA._toStore("BMA"):
1989             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1990     #
1991     return 0
1992
1993 # ==============================================================================
1994 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
1995     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1996     """
1997     Iterative EnKF
1998     """
1999     if selfA._parameters["EstimationOf"] == "Parameters":
2000         selfA._parameters["StoreInternalVariables"] = True
2001     #
2002     # Opérateurs
2003     H = HO["Direct"].appliedControledFormTo
2004     #
2005     if selfA._parameters["EstimationOf"] == "State":
2006         M = EM["Direct"].appliedControledFormTo
2007     #
2008     if CM is not None and "Tangent" in CM and U is not None:
2009         Cm = CM["Tangent"].asMatrix(Xb)
2010     else:
2011         Cm = None
2012     #
2013     # Durée d'observation et tailles
2014     if hasattr(Y,"stepnumber"):
2015         duration = Y.stepnumber()
2016         __p = numpy.cumprod(Y.shape())[-1]
2017     else:
2018         duration = 2
2019         __p = numpy.array(Y).size
2020     #
2021     # Précalcul des inversions de B et R
2022     if selfA._parameters["StoreInternalVariables"] \
2023         or selfA._toStore("CostFunctionJ") \
2024         or selfA._toStore("CostFunctionJb") \
2025         or selfA._toStore("CostFunctionJo") \
2026         or selfA._toStore("CurrentOptimum") \
2027         or selfA._toStore("APosterioriCovariance"):
2028         BI = B.getI()
2029     RI = R.getI()
2030     #
2031     __n = Xb.size
2032     __m = selfA._parameters["NumberOfMembers"]
2033     nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
2034     previousJMinimum = numpy.finfo(float).max
2035     #
2036     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2037         if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2038         else:                         Pn = B
2039         Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
2040         selfA.StoredVariables["Analysis"].store( Xb )
2041         if selfA._toStore("APosterioriCovariance"):
2042             if hasattr(B,"asfullmatrix"):
2043                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
2044             else:
2045                 selfA.StoredVariables["APosterioriCovariance"].store( B )
2046         selfA._setInternalState("seed", numpy.random.get_state())
2047     elif selfA._parameters["nextStep"]:
2048         Xn = selfA._getInternalState("Xn")
2049     #
2050     for step in range(duration-1):
2051         numpy.random.set_state(selfA._getInternalState("seed"))
2052         if hasattr(Y,"store"):
2053             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2054         else:
2055             Ynpu = numpy.ravel( Y ).reshape((__p,1))
2056         #
2057         if U is not None:
2058             if hasattr(U,"store") and len(U)>1:
2059                 Un = numpy.ravel( U[step] ).reshape((-1,1))
2060             elif hasattr(U,"store") and len(U)==1:
2061                 Un = numpy.ravel( U[0] ).reshape((-1,1))
2062             else:
2063                 Un = numpy.ravel( U ).reshape((-1,1))
2064         else:
2065             Un = None
2066         #
2067         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2068             Xn = CovarianceInflation( Xn,
2069                 selfA._parameters["InflationType"],
2070                 selfA._parameters["InflationFactor"],
2071                 )
2072         #
2073         #--------------------------
2074         if VariantM == "IEnKF12":
2075             Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
2076             EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
2077             __j = 0
2078             Deltaw = 1
2079             if not BnotT:
2080                 Ta  = numpy.identity(__m)
2081             vw  = numpy.zeros(__m)
2082             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2083                 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
2084                 #
2085                 if BnotT:
2086                     E1 = vx1 + _epsilon * EaX
2087                 else:
2088                     E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
2089                 #
2090                 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
2091                     E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2092                         argsAsSerie = True,
2093                         returnSerieAsArrayMatrix = True )
2094                 elif selfA._parameters["EstimationOf"] == "Parameters":
2095                     # --- > Par principe, M = Id
2096                     E2 = Xn
2097                 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2098                 vy1 = H((vx2, Un)).reshape((__p,1))
2099                 #
2100                 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
2101                     argsAsSerie = True,
2102                     returnSerieAsArrayMatrix = True )
2103                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2104                 #
2105                 if BnotT:
2106                     EaY = (HE2 - vy2) / _epsilon
2107                 else:
2108                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
2109                 #
2110                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
2111                 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
2112                 Deltaw = - numpy.linalg.solve(mH,GradJ)
2113                 #
2114                 vw = vw + Deltaw
2115                 #
2116                 if not BnotT:
2117                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2118                 #
2119                 __j = __j + 1
2120             #
2121             A2 = EnsembleOfAnomalies( E2 )
2122             #
2123             if BnotT:
2124                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2125                 A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
2126             #
2127             Xn = vx2 + A2
2128         #--------------------------
2129         else:
2130             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2131         #
2132         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2133             Xn = CovarianceInflation( Xn,
2134                 selfA._parameters["InflationType"],
2135                 selfA._parameters["InflationFactor"],
2136                 )
2137         #
2138         Xa = EnsembleMean( Xn )
2139         #--------------------------
2140         selfA._setInternalState("Xn", Xn)
2141         selfA._setInternalState("seed", numpy.random.get_state())
2142         #--------------------------
2143         #
2144         if selfA._parameters["StoreInternalVariables"] \
2145             or selfA._toStore("CostFunctionJ") \
2146             or selfA._toStore("CostFunctionJb") \
2147             or selfA._toStore("CostFunctionJo") \
2148             or selfA._toStore("APosterioriCovariance") \
2149             or selfA._toStore("InnovationAtCurrentAnalysis") \
2150             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2151             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2152             _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
2153             _Innovation = Ynpu - _HXa
2154         #
2155         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2156         # ---> avec analysis
2157         selfA.StoredVariables["Analysis"].store( Xa )
2158         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2159             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2160         if selfA._toStore("InnovationAtCurrentAnalysis"):
2161             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2162         # ---> avec current state
2163         if selfA._parameters["StoreInternalVariables"] \
2164             or selfA._toStore("CurrentState"):
2165             selfA.StoredVariables["CurrentState"].store( Xn )
2166         if selfA._toStore("ForecastState"):
2167             selfA.StoredVariables["ForecastState"].store( E2 )
2168         if selfA._toStore("ForecastCovariance"):
2169             selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(E2) )
2170         if selfA._toStore("BMA"):
2171             selfA.StoredVariables["BMA"].store( E2 - Xa )
2172         if selfA._toStore("InnovationAtCurrentState"):
2173             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
2174         if selfA._toStore("SimulatedObservationAtCurrentState") \
2175             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2176             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2177         # ---> autres
2178         if selfA._parameters["StoreInternalVariables"] \
2179             or selfA._toStore("CostFunctionJ") \
2180             or selfA._toStore("CostFunctionJb") \
2181             or selfA._toStore("CostFunctionJo") \
2182             or selfA._toStore("CurrentOptimum") \
2183             or selfA._toStore("APosterioriCovariance"):
2184             Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
2185             Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
2186             J   = Jb + Jo
2187             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2188             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2189             selfA.StoredVariables["CostFunctionJ" ].store( J )
2190             #
2191             if selfA._toStore("IndexOfOptimum") \
2192                 or selfA._toStore("CurrentOptimum") \
2193                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2194                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2195                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2196                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2197                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2198             if selfA._toStore("IndexOfOptimum"):
2199                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2200             if selfA._toStore("CurrentOptimum"):
2201                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2202             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2203                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2204             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2205                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2206             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2207                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2208             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2209                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2210         if selfA._toStore("APosterioriCovariance"):
2211             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2212         if selfA._parameters["EstimationOf"] == "Parameters" \
2213             and J < previousJMinimum:
2214             previousJMinimum    = J
2215             XaMin               = Xa
2216             if selfA._toStore("APosterioriCovariance"):
2217                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
2218         # ---> Pour les smoothers
2219         if selfA._toStore("CurrentEnsembleState"):
2220             selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
2221     #
2222     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2223     # ----------------------------------------------------------------------
2224     if selfA._parameters["EstimationOf"] == "Parameters":
2225         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2226         selfA.StoredVariables["Analysis"].store( XaMin )
2227         if selfA._toStore("APosterioriCovariance"):
2228             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2229         if selfA._toStore("BMA"):
2230             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2231     #
2232     return 0
2233
2234 # ==============================================================================
2235 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2236     """
2237     3DVAR incrémental
2238     """
2239     #
2240     # Initialisations
2241     # ---------------
2242     Hm = HO["Direct"].appliedTo
2243     #
2244     BI = B.getI()
2245     RI = R.getI()
2246     #
2247     HXb = numpy.asarray(Hm( Xb )).reshape((-1,1))
2248     Innovation = Y - HXb
2249     #
2250     # Outer Loop
2251     # ----------
2252     iOuter = 0
2253     J      = 1./mpr
2254     DeltaJ = 1./mpr
2255     Xr     = numpy.asarray(selfA._parameters["InitializationPoint"]).reshape((-1,1))
2256     while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
2257         #
2258         # Inner Loop
2259         # ----------
2260         Ht = HO["Tangent"].asMatrix(Xr)
2261         Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
2262         #
2263         # Définition de la fonction-coût
2264         # ------------------------------
2265         def CostFunction(dx):
2266             _dX  = numpy.asarray(dx).reshape((-1,1))
2267             if selfA._parameters["StoreInternalVariables"] or \
2268                 selfA._toStore("CurrentState") or \
2269                 selfA._toStore("CurrentOptimum"):
2270                 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
2271             _HdX = (Ht @ _dX).reshape((-1,1))
2272             _dInnovation = Innovation - _HdX
2273             if selfA._toStore("SimulatedObservationAtCurrentState") or \
2274                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2275                 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
2276             if selfA._toStore("InnovationAtCurrentState"):
2277                 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
2278             #
2279             Jb  = float( 0.5 * _dX.T * (BI * _dX) )
2280             Jo  = float( 0.5 * _dInnovation.T * (RI * _dInnovation) )
2281             J   = Jb + Jo
2282             #
2283             selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2284             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2285             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2286             selfA.StoredVariables["CostFunctionJ" ].store( J )
2287             if selfA._toStore("IndexOfOptimum") or \
2288                 selfA._toStore("CurrentOptimum") or \
2289                 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2290                 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2291                 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2292                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2293                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2294             if selfA._toStore("IndexOfOptimum"):
2295                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2296             if selfA._toStore("CurrentOptimum"):
2297                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2298             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2299                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2300             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2301                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2302             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2303                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2304             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2305                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2306             return J
2307         #
2308         def GradientOfCostFunction(dx):
2309             _dX          = numpy.ravel( dx )
2310             _HdX         = (Ht @ _dX).reshape((-1,1))
2311             _dInnovation = Innovation - _HdX
2312             GradJb       = BI @ _dX
2313             GradJo       = - Ht.T @ (RI * _dInnovation)
2314             GradJ        = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2315             return GradJ
2316         #
2317         # Minimisation de la fonctionnelle
2318         # --------------------------------
2319         nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2320         #
2321         if selfA._parameters["Minimizer"] == "LBFGSB":
2322             # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
2323             if "0.19" <= scipy.version.version <= "1.1.0":
2324                 import lbfgsbhlt as optimiseur
2325             else:
2326                 import scipy.optimize as optimiseur
2327             Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2328                 func        = CostFunction,
2329                 x0          = numpy.zeros(Xb.size),
2330                 fprime      = GradientOfCostFunction,
2331                 args        = (),
2332                 bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
2333                 maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
2334                 factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
2335                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2336                 iprint      = selfA._parameters["optiprint"],
2337                 )
2338             nfeval = Informations['funcalls']
2339             rc     = Informations['warnflag']
2340         elif selfA._parameters["Minimizer"] == "TNC":
2341             Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2342                 func        = CostFunction,
2343                 x0          = numpy.zeros(Xb.size),
2344                 fprime      = GradientOfCostFunction,
2345                 args        = (),
2346                 bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
2347                 maxfun      = selfA._parameters["MaximumNumberOfSteps"],
2348                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2349                 ftol        = selfA._parameters["CostDecrementTolerance"],
2350                 messages    = selfA._parameters["optmessages"],
2351                 )
2352         elif selfA._parameters["Minimizer"] == "CG":
2353             Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2354                 f           = CostFunction,
2355                 x0          = numpy.zeros(Xb.size),
2356                 fprime      = GradientOfCostFunction,
2357                 args        = (),
2358                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2359                 gtol        = selfA._parameters["GradientNormTolerance"],
2360                 disp        = selfA._parameters["optdisp"],
2361                 full_output = True,
2362                 )
2363         elif selfA._parameters["Minimizer"] == "NCG":
2364             Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2365                 f           = CostFunction,
2366                 x0          = numpy.zeros(Xb.size),
2367                 fprime      = GradientOfCostFunction,
2368                 args        = (),
2369                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2370                 avextol     = selfA._parameters["CostDecrementTolerance"],
2371                 disp        = selfA._parameters["optdisp"],
2372                 full_output = True,
2373                 )
2374         elif selfA._parameters["Minimizer"] == "BFGS":
2375             Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2376                 f           = CostFunction,
2377                 x0          = numpy.zeros(Xb.size),
2378                 fprime      = GradientOfCostFunction,
2379                 args        = (),
2380                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2381                 gtol        = selfA._parameters["GradientNormTolerance"],
2382                 disp        = selfA._parameters["optdisp"],
2383                 full_output = True,
2384                 )
2385         else:
2386             raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2387         #
2388         IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2389         MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2390         #
2391         if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2392             Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2393         else:
2394             Minimum = Xb + Minimum.reshape((-1,1))
2395         #
2396         Xr     = Minimum
2397         DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
2398         iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
2399     #
2400     Xa = Xr
2401     #--------------------------
2402     #
2403     selfA.StoredVariables["Analysis"].store( Xa )
2404     #
2405     if selfA._toStore("OMA") or \
2406         selfA._toStore("SigmaObs2") or \
2407         selfA._toStore("SimulationQuantiles") or \
2408         selfA._toStore("SimulatedObservationAtOptimum"):
2409         if selfA._toStore("SimulatedObservationAtCurrentState"):
2410             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2411         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2412             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2413         else:
2414             HXa = Hm( Xa )
2415     #
2416     if selfA._toStore("APosterioriCovariance") or \
2417         selfA._toStore("SimulationQuantiles") or \
2418         selfA._toStore("JacobianMatrixAtOptimum") or \
2419         selfA._toStore("KalmanGainAtOptimum"):
2420         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2421         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2422     if selfA._toStore("APosterioriCovariance") or \
2423         selfA._toStore("SimulationQuantiles") or \
2424         selfA._toStore("KalmanGainAtOptimum"):
2425         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2426         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2427     if selfA._toStore("APosterioriCovariance") or \
2428         selfA._toStore("SimulationQuantiles"):
2429         A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
2430     if selfA._toStore("APosterioriCovariance"):
2431         selfA.StoredVariables["APosterioriCovariance"].store( A )
2432     if selfA._toStore("JacobianMatrixAtOptimum"):
2433         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2434     if selfA._toStore("KalmanGainAtOptimum"):
2435         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2436         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2437         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2438     #
2439     # Calculs et/ou stockages supplémentaires
2440     # ---------------------------------------
2441     if selfA._toStore("Innovation") or \
2442         selfA._toStore("SigmaObs2") or \
2443         selfA._toStore("MahalanobisConsistency") or \
2444         selfA._toStore("OMB"):
2445         d  = Y - HXb
2446     if selfA._toStore("Innovation"):
2447         selfA.StoredVariables["Innovation"].store( d )
2448     if selfA._toStore("BMA"):
2449         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2450     if selfA._toStore("OMA"):
2451         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2452     if selfA._toStore("OMB"):
2453         selfA.StoredVariables["OMB"].store( d )
2454     if selfA._toStore("SigmaObs2"):
2455         TraceR = R.trace(Y.size)
2456         selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
2457     if selfA._toStore("MahalanobisConsistency"):
2458         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2459     if selfA._toStore("SimulationQuantiles"):
2460         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
2461     if selfA._toStore("SimulatedObservationAtBackground"):
2462         selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
2463     if selfA._toStore("SimulatedObservationAtOptimum"):
2464         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
2465     #
2466     return 0
2467
2468 # ==============================================================================
2469 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
2470     VariantM="MLEF13", BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000,
2471     Hybrid=None,
2472     ):
2473     """
2474     Maximum Likelihood Ensemble Filter
2475     """
2476     if selfA._parameters["EstimationOf"] == "Parameters":
2477         selfA._parameters["StoreInternalVariables"] = True
2478     #
2479     # Opérateurs
2480     H = HO["Direct"].appliedControledFormTo
2481     #
2482     if selfA._parameters["EstimationOf"] == "State":
2483         M = EM["Direct"].appliedControledFormTo
2484     #
2485     if CM is not None and "Tangent" in CM and U is not None:
2486         Cm = CM["Tangent"].asMatrix(Xb)
2487     else:
2488         Cm = None
2489     #
2490     # Durée d'observation et tailles
2491     if hasattr(Y,"stepnumber"):
2492         duration = Y.stepnumber()
2493         __p = numpy.cumprod(Y.shape())[-1]
2494     else:
2495         duration = 2
2496         __p = numpy.array(Y).size
2497     #
2498     # Précalcul des inversions de B et R
2499     if selfA._parameters["StoreInternalVariables"] \
2500         or selfA._toStore("CostFunctionJ") \
2501         or selfA._toStore("CostFunctionJb") \
2502         or selfA._toStore("CostFunctionJo") \
2503         or selfA._toStore("CurrentOptimum") \
2504         or selfA._toStore("APosterioriCovariance"):
2505         BI = B.getI()
2506     RI = R.getI()
2507     #
2508     __n = Xb.size
2509     __m = selfA._parameters["NumberOfMembers"]
2510     nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
2511     previousJMinimum = numpy.finfo(float).max
2512     #
2513     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2514         Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2515         selfA.StoredVariables["Analysis"].store( Xb )
2516         if selfA._toStore("APosterioriCovariance"):
2517             if hasattr(B,"asfullmatrix"):
2518                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
2519             else:
2520                 selfA.StoredVariables["APosterioriCovariance"].store( B )
2521         selfA._setInternalState("seed", numpy.random.get_state())
2522     elif selfA._parameters["nextStep"]:
2523         Xn = selfA._getInternalState("Xn")
2524     #
2525     for step in range(duration-1):
2526         numpy.random.set_state(selfA._getInternalState("seed"))
2527         if hasattr(Y,"store"):
2528             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2529         else:
2530             Ynpu = numpy.ravel( Y ).reshape((__p,1))
2531         #
2532         if U is not None:
2533             if hasattr(U,"store") and len(U)>1:
2534                 Un = numpy.ravel( U[step] ).reshape((-1,1))
2535             elif hasattr(U,"store") and len(U)==1:
2536                 Un = numpy.ravel( U[0] ).reshape((-1,1))
2537             else:
2538                 Un = numpy.ravel( U ).reshape((-1,1))
2539         else:
2540             Un = None
2541         #
2542         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2543             Xn = CovarianceInflation( Xn,
2544                 selfA._parameters["InflationType"],
2545                 selfA._parameters["InflationFactor"],
2546                 )
2547         #
2548         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2549             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2550                 argsAsSerie = True,
2551                 returnSerieAsArrayMatrix = True )
2552             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
2553             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2554                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2555                 Xn_predicted = Xn_predicted + Cm @ Un
2556         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2557             # --- > Par principe, M = Id, Q = 0
2558             Xn_predicted = EMX = Xn
2559         #
2560         #--------------------------
2561         if VariantM == "MLEF13":
2562             Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
2563             EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
2564             Ua  = numpy.identity(__m)
2565             __j = 0
2566             Deltaw = 1
2567             if not BnotT:
2568                 Ta  = numpy.identity(__m)
2569             vw  = numpy.zeros(__m)
2570             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2571                 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
2572                 #
2573                 if BnotT:
2574                     E1 = vx1 + _epsilon * EaX
2575                 else:
2576                     E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
2577                 #
2578                 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2579                     argsAsSerie = True,
2580                     returnSerieAsArrayMatrix = True )
2581                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2582                 #
2583                 if BnotT:
2584                     EaY = (HE2 - vy2) / _epsilon
2585                 else:
2586                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
2587                 #
2588                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
2589                 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
2590                 Deltaw = - numpy.linalg.solve(mH,GradJ)
2591                 #
2592                 vw = vw + Deltaw
2593                 #
2594                 if not BnotT:
2595                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2596                 #
2597                 __j = __j + 1
2598             #
2599             if BnotT:
2600                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2601             #
2602             Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
2603         #--------------------------
2604         else:
2605             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2606         #
2607         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2608             Xn = CovarianceInflation( Xn,
2609                 selfA._parameters["InflationType"],
2610                 selfA._parameters["InflationFactor"],
2611                 )
2612         #
2613         if Hybrid == "E3DVAR":
2614             betaf = selfA._parameters["HybridCovarianceEquilibrium"]
2615             Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
2616         #
2617         Xa = EnsembleMean( Xn )
2618         #--------------------------
2619         selfA._setInternalState("Xn", Xn)
2620         selfA._setInternalState("seed", numpy.random.get_state())
2621         #--------------------------
2622         #
2623         if selfA._parameters["StoreInternalVariables"] \
2624             or selfA._toStore("CostFunctionJ") \
2625             or selfA._toStore("CostFunctionJb") \
2626             or selfA._toStore("CostFunctionJo") \
2627             or selfA._toStore("APosterioriCovariance") \
2628             or selfA._toStore("InnovationAtCurrentAnalysis") \
2629             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2630             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2631             _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
2632             _Innovation = Ynpu - _HXa
2633         #
2634         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2635         # ---> avec analysis
2636         selfA.StoredVariables["Analysis"].store( Xa )
2637         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2638             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2639         if selfA._toStore("InnovationAtCurrentAnalysis"):
2640             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2641         # ---> avec current state
2642         if selfA._parameters["StoreInternalVariables"] \
2643             or selfA._toStore("CurrentState"):
2644             selfA.StoredVariables["CurrentState"].store( Xn )
2645         if selfA._toStore("ForecastState"):
2646             selfA.StoredVariables["ForecastState"].store( EMX )
2647         if selfA._toStore("ForecastCovariance"):
2648             selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
2649         if selfA._toStore("BMA"):
2650             selfA.StoredVariables["BMA"].store( EMX - Xa )
2651         if selfA._toStore("InnovationAtCurrentState"):
2652             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
2653         if selfA._toStore("SimulatedObservationAtCurrentState") \
2654             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2655             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2656         # ---> autres
2657         if selfA._parameters["StoreInternalVariables"] \
2658             or selfA._toStore("CostFunctionJ") \
2659             or selfA._toStore("CostFunctionJb") \
2660             or selfA._toStore("CostFunctionJo") \
2661             or selfA._toStore("CurrentOptimum") \
2662             or selfA._toStore("APosterioriCovariance"):
2663             Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
2664             Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
2665             J   = Jb + Jo
2666             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2667             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2668             selfA.StoredVariables["CostFunctionJ" ].store( J )
2669             #
2670             if selfA._toStore("IndexOfOptimum") \
2671                 or selfA._toStore("CurrentOptimum") \
2672                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2673                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2674                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2675                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2676                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2677             if selfA._toStore("IndexOfOptimum"):
2678                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2679             if selfA._toStore("CurrentOptimum"):
2680                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2681             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2682                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2683             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2684                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2685             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2686                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2687             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2688                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2689         if selfA._toStore("APosterioriCovariance"):
2690             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2691         if selfA._parameters["EstimationOf"] == "Parameters" \
2692             and J < previousJMinimum:
2693             previousJMinimum    = J
2694             XaMin               = Xa
2695             if selfA._toStore("APosterioriCovariance"):
2696                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
2697         # ---> Pour les smoothers
2698         if selfA._toStore("CurrentEnsembleState"):
2699             selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
2700     #
2701     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2702     # ----------------------------------------------------------------------
2703     if selfA._parameters["EstimationOf"] == "Parameters":
2704         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2705         selfA.StoredVariables["Analysis"].store( XaMin )
2706         if selfA._toStore("APosterioriCovariance"):
2707             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2708         if selfA._toStore("BMA"):
2709             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2710     #
2711     return 0
2712
2713 # ==============================================================================
2714 def mmqr(
2715         func     = None,
2716         x0       = None,
2717         fprime   = None,
2718         bounds   = None,
2719         quantile = 0.5,
2720         maxfun   = 15000,
2721         toler    = 1.e-06,
2722         y        = None,
2723         ):
2724     """
2725     Implémentation informatique de l'algorithme MMQR, basée sur la publication :
2726     David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
2727     Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
2728     """
2729     #
2730     # Recuperation des donnees et informations initiales
2731     # --------------------------------------------------
2732     variables = numpy.ravel( x0 )
2733     mesures   = numpy.ravel( y )
2734     increment = sys.float_info[0]
2735     p         = variables.size
2736     n         = mesures.size
2737     quantile  = float(quantile)
2738     #
2739     # Calcul des parametres du MM
2740     # ---------------------------
2741     tn      = float(toler) / n
2742     e0      = -tn / math.log(tn)
2743     epsilon = (e0-tn)/(1+math.log(e0))
2744     #
2745     # Calculs d'initialisation
2746     # ------------------------
2747     residus  = mesures - numpy.ravel( func( variables ) )
2748     poids    = 1./(epsilon+numpy.abs(residus))
2749     veps     = 1. - 2. * quantile - residus * poids
2750     lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
2751     iteration = 0
2752     #
2753     # Recherche iterative
2754     # -------------------
2755     while (increment > toler) and (iteration < maxfun) :
2756         iteration += 1
2757         #
2758         Derivees  = numpy.array(fprime(variables))
2759         Derivees  = Derivees.reshape(n,p) # ADAO & check shape
2760         DeriveesT = Derivees.transpose()
2761         M         =   numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
2762         SM        =   numpy.transpose(numpy.dot( DeriveesT , veps ))
2763         step      = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
2764         #
2765         variables = variables + step
2766         if bounds is not None:
2767             # Attention : boucle infinie à éviter si un intervalle est trop petit
2768             while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
2769                 step      = step/2.
2770                 variables = variables - step
2771         residus   = mesures - numpy.ravel( func(variables) )
2772         surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2773         #
2774         while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
2775             step      = step/2.
2776             variables = variables - step
2777             residus   = mesures - numpy.ravel( func(variables) )
2778             surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2779         #
2780         increment     = lastsurrogate-surrogate
2781         poids         = 1./(epsilon+numpy.abs(residus))
2782         veps          = 1. - 2. * quantile - residus * poids
2783         lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
2784     #
2785     # Mesure d'écart
2786     # --------------
2787     Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
2788     #
2789     return variables, Ecart, [n,p,iteration,increment,0]
2790
2791 # ==============================================================================
2792 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
2793     """
2794     3DVAR multi-pas et multi-méthodes
2795     """
2796     #
2797     # Initialisation
2798     # --------------
2799     if selfA._parameters["EstimationOf"] == "State":
2800         M = EM["Direct"].appliedControledFormTo
2801         if CM is not None and "Tangent" in CM and U is not None:
2802             Cm = CM["Tangent"].asMatrix(Xb)
2803         else:
2804             Cm = None
2805         #
2806         if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2807             Xn = numpy.ravel(Xb).reshape((-1,1))
2808             selfA.StoredVariables["Analysis"].store( Xn )
2809             if selfA._toStore("APosterioriCovariance"):
2810                 if hasattr(B,"asfullmatrix"):
2811                     selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(Xn.size) )
2812                 else:
2813                     selfA.StoredVariables["APosterioriCovariance"].store( B )
2814             if selfA._toStore("ForecastState"):
2815                 selfA.StoredVariables["ForecastState"].store( Xn )
2816         elif selfA._parameters["nextStep"]:
2817             Xn = selfA._getInternalState("Xn")
2818     else:
2819         Xn = numpy.ravel(Xb).reshape((-1,1))
2820     #
2821     if hasattr(Y,"stepnumber"):
2822         duration = Y.stepnumber()
2823     else:
2824         duration = 2
2825     #
2826     # Multi-pas
2827     for step in range(duration-1):
2828         if hasattr(Y,"store"):
2829             Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
2830         else:
2831             Ynpu = numpy.ravel( Y ).reshape((-1,1))
2832         #
2833         if U is not None:
2834             if hasattr(U,"store") and len(U)>1:
2835                 Un = numpy.ravel( U[step] ).reshape((-1,1))
2836             elif hasattr(U,"store") and len(U)==1:
2837                 Un = numpy.ravel( U[0] ).reshape((-1,1))
2838             else:
2839                 Un = numpy.ravel( U ).reshape((-1,1))
2840         else:
2841             Un = None
2842         #
2843         if selfA._parameters["EstimationOf"] == "State": # Forecast
2844             Xn_predicted = M( (Xn, Un) )
2845             if selfA._toStore("ForecastState"):
2846                 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
2847             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2848                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2849                 Xn_predicted = Xn_predicted + Cm @ Un
2850         elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
2851             # --- > Par principe, M = Id, Q = 0
2852             Xn_predicted = Xn
2853         Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
2854         #
2855         oneCycle(selfA, Xn_predicted, Ynpu, None, HO, None, None, R, B, None)
2856         #
2857         Xn = selfA.StoredVariables["Analysis"][-1]
2858         #--------------------------
2859         selfA._setInternalState("Xn", Xn)
2860     #
2861     return 0
2862
2863 # ==============================================================================
2864 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2865     """
2866     3DVAR PSAS
2867     """
2868     #
2869     # Initialisations
2870     # ---------------
2871     Hm = HO["Direct"].appliedTo
2872     #
2873     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2874         HXb = numpy.asarray(Hm( Xb, HO["AppliedInX"]["HXb"] ))
2875     else:
2876         HXb = numpy.asarray(Hm( Xb ))
2877     HXb = numpy.ravel( HXb ).reshape((-1,1))
2878     if Y.size != HXb.size:
2879         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2880     if max(Y.shape) != max(HXb.shape):
2881         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2882     #
2883     if selfA._toStore("JacobianMatrixAtBackground"):
2884         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2885         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2886         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2887     #
2888     Ht = HO["Tangent"].asMatrix(Xb)
2889     BHT = B * Ht.T
2890     HBHTpR = R + Ht * BHT
2891     Innovation = Y - HXb
2892     #
2893     Xini = numpy.zeros(Y.size)
2894     #
2895     # Définition de la fonction-coût
2896     # ------------------------------
2897     def CostFunction(w):
2898         _W = numpy.asarray(w).reshape((-1,1))
2899         if selfA._parameters["StoreInternalVariables"] or \
2900             selfA._toStore("CurrentState") or \
2901             selfA._toStore("CurrentOptimum"):
2902             selfA.StoredVariables["CurrentState"].store( Xb + BHT @ _W )
2903         if selfA._toStore("SimulatedObservationAtCurrentState") or \
2904             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2905             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT @ _W ) )
2906         if selfA._toStore("InnovationAtCurrentState"):
2907             selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
2908         #
2909         Jb  = float( 0.5 * _W.T @ (HBHTpR @ _W) )
2910         Jo  = float( - _W.T @ Innovation )
2911         J   = Jb + Jo
2912         #
2913         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2914         selfA.StoredVariables["CostFunctionJb"].store( Jb )
2915         selfA.StoredVariables["CostFunctionJo"].store( Jo )
2916         selfA.StoredVariables["CostFunctionJ" ].store( J )
2917         if selfA._toStore("IndexOfOptimum") or \
2918             selfA._toStore("CurrentOptimum") or \
2919             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2920             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2921             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2922             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2923             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2924         if selfA._toStore("IndexOfOptimum"):
2925             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2926         if selfA._toStore("CurrentOptimum"):
2927             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2928         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2929             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2930         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2931             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2932         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2933             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2934         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2935             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2936         return J
2937     #
2938     def GradientOfCostFunction(w):
2939         _W = numpy.asarray(w).reshape((-1,1))
2940         GradJb  = HBHTpR @ _W
2941         GradJo  = - Innovation
2942         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2943         return GradJ
2944     #
2945     # Minimisation de la fonctionnelle
2946     # --------------------------------
2947     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2948     #
2949     if selfA._parameters["Minimizer"] == "LBFGSB":
2950         if "0.19" <= scipy.version.version <= "1.1.0":
2951             import lbfgsbhlt as optimiseur
2952         else:
2953             import scipy.optimize as optimiseur
2954         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2955             func        = CostFunction,
2956             x0          = Xini,
2957             fprime      = GradientOfCostFunction,
2958             args        = (),
2959             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
2960             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
2961             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2962             iprint      = selfA._parameters["optiprint"],
2963             )
2964         nfeval = Informations['funcalls']
2965         rc     = Informations['warnflag']
2966     elif selfA._parameters["Minimizer"] == "TNC":
2967         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2968             func        = CostFunction,
2969             x0          = Xini,
2970             fprime      = GradientOfCostFunction,
2971             args        = (),
2972             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
2973             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2974             ftol        = selfA._parameters["CostDecrementTolerance"],
2975             messages    = selfA._parameters["optmessages"],
2976             )
2977     elif selfA._parameters["Minimizer"] == "CG":
2978         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2979             f           = CostFunction,
2980             x0          = Xini,
2981             fprime      = GradientOfCostFunction,
2982             args        = (),
2983             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2984             gtol        = selfA._parameters["GradientNormTolerance"],
2985             disp        = selfA._parameters["optdisp"],
2986             full_output = True,
2987             )
2988     elif selfA._parameters["Minimizer"] == "NCG":
2989         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2990             f           = CostFunction,
2991             x0          = Xini,
2992             fprime      = GradientOfCostFunction,
2993             args        = (),
2994             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2995             avextol     = selfA._parameters["CostDecrementTolerance"],
2996             disp        = selfA._parameters["optdisp"],
2997             full_output = True,
2998             )
2999     elif selfA._parameters["Minimizer"] == "BFGS":
3000         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3001             f           = CostFunction,
3002             x0          = Xini,
3003             fprime      = GradientOfCostFunction,
3004             args        = (),
3005             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3006             gtol        = selfA._parameters["GradientNormTolerance"],
3007             disp        = selfA._parameters["optdisp"],
3008             full_output = True,
3009             )
3010     else:
3011         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3012     #
3013     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3014     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3015     #
3016     # Correction pour pallier a un bug de TNC sur le retour du Minimum
3017     # ----------------------------------------------------------------
3018     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3019         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3020     else:
3021         Minimum = Xb + BHT @ Minimum.reshape((-1,1))
3022     #
3023     Xa = Minimum
3024     #--------------------------
3025     #
3026     selfA.StoredVariables["Analysis"].store( Xa )
3027     #
3028     if selfA._toStore("OMA") or \
3029         selfA._toStore("SigmaObs2") or \
3030         selfA._toStore("SimulationQuantiles") or \
3031         selfA._toStore("SimulatedObservationAtOptimum"):
3032         if selfA._toStore("SimulatedObservationAtCurrentState"):
3033             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3034         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3035             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3036         else:
3037             HXa = Hm( Xa )
3038     #
3039     if selfA._toStore("APosterioriCovariance") or \
3040         selfA._toStore("SimulationQuantiles") or \
3041         selfA._toStore("JacobianMatrixAtOptimum") or \
3042         selfA._toStore("KalmanGainAtOptimum"):
3043         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3044         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3045     if selfA._toStore("APosterioriCovariance") or \
3046         selfA._toStore("SimulationQuantiles") or \
3047         selfA._toStore("KalmanGainAtOptimum"):
3048         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3049         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3050     if selfA._toStore("APosterioriCovariance") or \
3051         selfA._toStore("SimulationQuantiles"):
3052         BI = B.getI()
3053         RI = R.getI()
3054         A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
3055     if selfA._toStore("APosterioriCovariance"):
3056         selfA.StoredVariables["APosterioriCovariance"].store( A )
3057     if selfA._toStore("JacobianMatrixAtOptimum"):
3058         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3059     if selfA._toStore("KalmanGainAtOptimum"):
3060         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3061         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3062         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3063     #
3064     # Calculs et/ou stockages supplémentaires
3065     # ---------------------------------------
3066     if selfA._toStore("Innovation") or \
3067         selfA._toStore("SigmaObs2") or \
3068         selfA._toStore("MahalanobisConsistency") or \
3069         selfA._toStore("OMB"):
3070         d  = Y - HXb
3071     if selfA._toStore("Innovation"):
3072         selfA.StoredVariables["Innovation"].store( d )
3073     if selfA._toStore("BMA"):
3074         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3075     if selfA._toStore("OMA"):
3076         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3077     if selfA._toStore("OMB"):
3078         selfA.StoredVariables["OMB"].store( d )
3079     if selfA._toStore("SigmaObs2"):
3080         TraceR = R.trace(Y.size)
3081         selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
3082     if selfA._toStore("MahalanobisConsistency"):
3083         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3084     if selfA._toStore("SimulationQuantiles"):
3085         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
3086     if selfA._toStore("SimulatedObservationAtBackground"):
3087         selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
3088     if selfA._toStore("SimulatedObservationAtOptimum"):
3089         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
3090     #
3091     return 0
3092
3093 # ==============================================================================
3094 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
3095     VariantM="KalmanFilterFormula16",
3096     Hybrid=None,
3097     ):
3098     """
3099     Stochastic EnKF
3100     """
3101     if selfA._parameters["EstimationOf"] == "Parameters":
3102         selfA._parameters["StoreInternalVariables"] = True
3103     #
3104     # Opérateurs
3105     H = HO["Direct"].appliedControledFormTo
3106     #
3107     if selfA._parameters["EstimationOf"] == "State":
3108         M = EM["Direct"].appliedControledFormTo
3109     #
3110     if CM is not None and "Tangent" in CM and U is not None:
3111         Cm = CM["Tangent"].asMatrix(Xb)
3112     else:
3113         Cm = None
3114     #
3115     # Durée d'observation et tailles
3116     if hasattr(Y,"stepnumber"):
3117         duration = Y.stepnumber()
3118         __p = numpy.cumprod(Y.shape())[-1]
3119     else:
3120         duration = 2
3121         __p = numpy.array(Y).size
3122     #
3123     # Précalcul des inversions de B et R
3124     if selfA._parameters["StoreInternalVariables"] \
3125         or selfA._toStore("CostFunctionJ") \
3126         or selfA._toStore("CostFunctionJb") \
3127         or selfA._toStore("CostFunctionJo") \
3128         or selfA._toStore("CurrentOptimum") \
3129         or selfA._toStore("APosterioriCovariance"):
3130         BI = B.getI()
3131         RI = R.getI()
3132     #
3133     __n = Xb.size
3134     __m = selfA._parameters["NumberOfMembers"]
3135     nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
3136     previousJMinimum = numpy.finfo(float).max
3137     #
3138     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
3139     else:                         Rn = R
3140     #
3141     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
3142         if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
3143         else:                         Pn = B
3144         Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
3145         selfA.StoredVariables["Analysis"].store( Xb )
3146         if selfA._toStore("APosterioriCovariance"):
3147             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3148         selfA._setInternalState("seed", numpy.random.get_state())
3149     elif selfA._parameters["nextStep"]:
3150         Xn = selfA._getInternalState("Xn")
3151     #
3152     for step in range(duration-1):
3153         numpy.random.set_state(selfA._getInternalState("seed"))
3154         if hasattr(Y,"store"):
3155             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
3156         else:
3157             Ynpu = numpy.ravel( Y ).reshape((__p,1))
3158         #
3159         if U is not None:
3160             if hasattr(U,"store") and len(U)>1:
3161                 Un = numpy.ravel( U[step] ).reshape((-1,1))
3162             elif hasattr(U,"store") and len(U)==1:
3163                 Un = numpy.ravel( U[0] ).reshape((-1,1))
3164             else:
3165                 Un = numpy.ravel( U ).reshape((-1,1))
3166         else:
3167             Un = None
3168         #
3169         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
3170             Xn = CovarianceInflation( Xn,
3171                 selfA._parameters["InflationType"],
3172                 selfA._parameters["InflationFactor"],
3173                 )
3174         #
3175         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
3176             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
3177                 argsAsSerie = True,
3178                 returnSerieAsArrayMatrix = True )
3179             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
3180             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
3181                 argsAsSerie = True,
3182                 returnSerieAsArrayMatrix = True )
3183             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
3184                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
3185                 Xn_predicted = Xn_predicted + Cm @ Un
3186         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
3187             # --- > Par principe, M = Id, Q = 0
3188             Xn_predicted = EMX = Xn
3189             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
3190                 argsAsSerie = True,
3191                 returnSerieAsArrayMatrix = True )
3192         #
3193         # Mean of forecast and observation of forecast
3194         Xfm  = EnsembleMean( Xn_predicted )
3195         Hfm  = EnsembleMean( HX_predicted )
3196         #
3197         #--------------------------
3198         if VariantM == "KalmanFilterFormula05":
3199             PfHT, HPfHT = 0., 0.
3200             for i in range(__m):
3201                 Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
3202                 Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
3203                 PfHT  += Exfi * Eyfi.T
3204                 HPfHT += Eyfi * Eyfi.T
3205             PfHT  = (1./(__m-1)) * PfHT
3206             HPfHT = (1./(__m-1)) * HPfHT
3207             Kn     = PfHT * ( R + HPfHT ).I
3208             del PfHT, HPfHT
3209             #
3210             for i in range(__m):
3211                 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
3212                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
3213         #--------------------------
3214         elif VariantM == "KalmanFilterFormula16":
3215             EpY   = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
3216             EpYm  = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
3217             #
3218             EaX   = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
3219             EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
3220             #
3221             Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
3222             #
3223             for i in range(__m):
3224                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
3225         #--------------------------
3226         else:
3227             raise ValueError("VariantM has to be chosen in the authorized methods list.")
3228         #
3229         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
3230             Xn = CovarianceInflation( Xn,
3231                 selfA._parameters["InflationType"],
3232                 selfA._parameters["InflationFactor"],
3233                 )
3234         #
3235         if Hybrid == "E3DVAR":
3236             betaf = selfA._parameters["HybridCovarianceEquilibrium"]
3237             Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
3238         #
3239         Xa = EnsembleMean( Xn )
3240         #--------------------------
3241         selfA._setInternalState("Xn", Xn)
3242         selfA._setInternalState("seed", numpy.random.get_state())
3243         #--------------------------
3244         #
3245         if selfA._parameters["StoreInternalVariables"] \
3246             or selfA._toStore("CostFunctionJ") \
3247             or selfA._toStore("CostFunctionJb") \
3248             or selfA._toStore("CostFunctionJo") \
3249             or selfA._toStore("APosterioriCovariance") \
3250             or selfA._toStore("InnovationAtCurrentAnalysis") \
3251             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
3252             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3253             _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
3254             _Innovation = Ynpu - _HXa
3255         #
3256         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3257         # ---> avec analysis
3258         selfA.StoredVariables["Analysis"].store( Xa )
3259         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
3260             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
3261         if selfA._toStore("InnovationAtCurrentAnalysis"):
3262             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
3263         # ---> avec current state
3264         if selfA._parameters["StoreInternalVariables"] \
3265             or selfA._toStore("CurrentState"):
3266             selfA.StoredVariables["CurrentState"].store( Xn )
3267         if selfA._toStore("ForecastState"):
3268             selfA.StoredVariables["ForecastState"].store( EMX )
3269         if selfA._toStore("ForecastCovariance"):
3270             selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
3271         if selfA._toStore("BMA"):
3272             selfA.StoredVariables["BMA"].store( EMX - Xa )
3273         if selfA._toStore("InnovationAtCurrentState"):
3274             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
3275         if selfA._toStore("SimulatedObservationAtCurrentState") \
3276             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3277             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
3278         # ---> autres
3279         if selfA._parameters["StoreInternalVariables"] \
3280             or selfA._toStore("CostFunctionJ") \
3281             or selfA._toStore("CostFunctionJb") \
3282             or selfA._toStore("CostFunctionJo") \
3283             or selfA._toStore("CurrentOptimum") \
3284             or selfA._toStore("APosterioriCovariance"):
3285             Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
3286             Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
3287             J   = Jb + Jo
3288             selfA.StoredVariables["CostFunctionJb"].store( Jb )
3289             selfA.StoredVariables["CostFunctionJo"].store( Jo )
3290             selfA.StoredVariables["CostFunctionJ" ].store( J )
3291             #
3292             if selfA._toStore("IndexOfOptimum") \
3293                 or selfA._toStore("CurrentOptimum") \
3294                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
3295                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
3296                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
3297                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3298                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3299             if selfA._toStore("IndexOfOptimum"):
3300                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3301             if selfA._toStore("CurrentOptimum"):
3302                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
3303             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3304                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
3305             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3306                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3307             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3308                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3309             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3310                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3311         if selfA._toStore("APosterioriCovariance"):
3312             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
3313         if selfA._parameters["EstimationOf"] == "Parameters" \
3314             and J < previousJMinimum:
3315             previousJMinimum    = J
3316             XaMin               = Xa
3317             if selfA._toStore("APosterioriCovariance"):
3318                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
3319         # ---> Pour les smoothers
3320         if selfA._toStore("CurrentEnsembleState"):
3321             selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
3322     #
3323     # Stockage final supplémentaire de l'optimum en estimation de paramètres
3324     # ----------------------------------------------------------------------
3325     if selfA._parameters["EstimationOf"] == "Parameters":
3326         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3327         selfA.StoredVariables["Analysis"].store( XaMin )
3328         if selfA._toStore("APosterioriCovariance"):
3329             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3330         if selfA._toStore("BMA"):
3331             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3332     #
3333     return 0
3334
3335 # ==============================================================================
3336 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3337     """
3338     3DVAR
3339     """
3340     #
3341     # Initialisations
3342     # ---------------
3343     Hm = HO["Direct"].appliedTo
3344     Ha = HO["Adjoint"].appliedInXTo
3345     #
3346     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
3347         HXb = numpy.asarray(Hm( Xb, HO["AppliedInX"]["HXb"] ))
3348     else:
3349         HXb = numpy.asarray(Hm( Xb ))
3350     HXb = HXb.reshape((-1,1))
3351     if Y.size != HXb.size:
3352         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
3353     if max(Y.shape) != max(HXb.shape):
3354         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
3355     #
3356     if selfA._toStore("JacobianMatrixAtBackground"):
3357         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
3358         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
3359         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
3360     #
3361     BI = B.getI()
3362     RI = R.getI()
3363     #
3364     Xini = selfA._parameters["InitializationPoint"]
3365     #
3366     # Définition de la fonction-coût
3367     # ------------------------------
3368     def CostFunction(x):
3369         _X  = numpy.asarray(x).reshape((-1,1))
3370         if selfA._parameters["StoreInternalVariables"] or \
3371             selfA._toStore("CurrentState") or \
3372             selfA._toStore("CurrentOptimum"):
3373             selfA.StoredVariables["CurrentState"].store( _X )
3374         _HX = numpy.asarray(Hm( _X )).reshape((-1,1))
3375         _Innovation = Y - _HX
3376         if selfA._toStore("SimulatedObservationAtCurrentState") or \
3377             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3378             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
3379         if selfA._toStore("InnovationAtCurrentState"):
3380             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3381         #
3382         Jb  = float( 0.5 * (_X - Xb).T * (BI * (_X - Xb)) )
3383         Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
3384         J   = Jb + Jo
3385         #
3386         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3387         selfA.StoredVariables["CostFunctionJb"].store( Jb )
3388         selfA.StoredVariables["CostFunctionJo"].store( Jo )
3389         selfA.StoredVariables["CostFunctionJ" ].store( J )
3390         if selfA._toStore("IndexOfOptimum") or \
3391             selfA._toStore("CurrentOptimum") or \
3392             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3393             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3394             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
3395             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3396             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3397         if selfA._toStore("IndexOfOptimum"):
3398             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3399         if selfA._toStore("CurrentOptimum"):
3400             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3401         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3402             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
3403         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3404             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3405         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3406             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3407         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3408             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3409         return J
3410     #
3411     def GradientOfCostFunction(x):
3412         _X      = numpy.asarray(x).reshape((-1,1))
3413         _HX     = numpy.asarray(Hm( _X )).reshape((-1,1))
3414         GradJb  = BI * (_X - Xb)
3415         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
3416         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
3417         return GradJ
3418     #
3419     # Minimisation de la fonctionnelle
3420     # --------------------------------
3421     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3422     #
3423     if selfA._parameters["Minimizer"] == "LBFGSB":
3424         if "0.19" <= scipy.version.version <= "1.1.0":
3425             import lbfgsbhlt as optimiseur
3426         else:
3427             import scipy.optimize as optimiseur
3428         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3429             func        = CostFunction,
3430             x0          = Xini,
3431             fprime      = GradientOfCostFunction,
3432             args        = (),
3433             bounds      = selfA._parameters["Bounds"],
3434             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
3435             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
3436             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3437             iprint      = selfA._parameters["optiprint"],
3438             )
3439         nfeval = Informations['funcalls']
3440         rc     = Informations['warnflag']
3441     elif selfA._parameters["Minimizer"] == "TNC":
3442         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3443             func        = CostFunction,
3444             x0          = Xini,
3445             fprime      = GradientOfCostFunction,
3446             args        = (),
3447             bounds      = selfA._parameters["Bounds"],
3448             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
3449             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3450             ftol        = selfA._parameters["CostDecrementTolerance"],
3451             messages    = selfA._parameters["optmessages"],
3452             )
3453     elif selfA._parameters["Minimizer"] == "CG":
3454         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3455             f           = CostFunction,
3456             x0          = Xini,
3457             fprime      = GradientOfCostFunction,
3458             args        = (),
3459             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3460             gtol        = selfA._parameters["GradientNormTolerance"],
3461             disp        = selfA._parameters["optdisp"],
3462             full_output = True,
3463             )
3464     elif selfA._parameters["Minimizer"] == "NCG":
3465         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3466             f           = CostFunction,
3467             x0          = Xini,
3468             fprime      = GradientOfCostFunction,
3469             args        = (),
3470             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3471             avextol     = selfA._parameters["CostDecrementTolerance"],
3472             disp        = selfA._parameters["optdisp"],
3473             full_output = True,
3474             )
3475     elif selfA._parameters["Minimizer"] == "BFGS":
3476         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3477             f           = CostFunction,
3478             x0          = Xini,
3479             fprime      = GradientOfCostFunction,
3480             args        = (),
3481             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3482             gtol        = selfA._parameters["GradientNormTolerance"],
3483             disp        = selfA._parameters["optdisp"],
3484             full_output = True,
3485             )
3486     else:
3487         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3488     #
3489     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3490     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3491     #
3492     # Correction pour pallier a un bug de TNC sur le retour du Minimum
3493     # ----------------------------------------------------------------
3494     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3495         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3496     #
3497     Xa = Minimum
3498     #--------------------------
3499     #
3500     selfA.StoredVariables["Analysis"].store( Xa )
3501     #
3502     if selfA._toStore("OMA") or \
3503         selfA._toStore("SigmaObs2") or \
3504         selfA._toStore("SimulationQuantiles") or \
3505         selfA._toStore("SimulatedObservationAtOptimum"):
3506         if selfA._toStore("SimulatedObservationAtCurrentState"):
3507             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3508         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3509             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3510         else:
3511             HXa = Hm( Xa )
3512     #
3513     if selfA._toStore("APosterioriCovariance") or \
3514         selfA._toStore("SimulationQuantiles") or \
3515         selfA._toStore("JacobianMatrixAtOptimum") or \
3516         selfA._toStore("KalmanGainAtOptimum"):
3517         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3518         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3519     if selfA._toStore("APosterioriCovariance") or \
3520         selfA._toStore("SimulationQuantiles") or \
3521         selfA._toStore("KalmanGainAtOptimum"):
3522         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3523         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3524     if selfA._toStore("APosterioriCovariance") or \
3525         selfA._toStore("SimulationQuantiles"):
3526         A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
3527     if selfA._toStore("APosterioriCovariance"):
3528         selfA.StoredVariables["APosterioriCovariance"].store( A )
3529     if selfA._toStore("JacobianMatrixAtOptimum"):
3530         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3531     if selfA._toStore("KalmanGainAtOptimum"):
3532         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3533         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3534         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3535     #
3536     # Calculs et/ou stockages supplémentaires
3537     # ---------------------------------------
3538     if selfA._toStore("Innovation") or \
3539         selfA._toStore("SigmaObs2") or \
3540         selfA._toStore("MahalanobisConsistency") or \
3541         selfA._toStore("OMB"):
3542         d  = Y - HXb
3543     if selfA._toStore("Innovation"):
3544         selfA.StoredVariables["Innovation"].store( d )
3545     if selfA._toStore("BMA"):
3546         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3547     if selfA._toStore("OMA"):
3548         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3549     if selfA._toStore("OMB"):
3550         selfA.StoredVariables["OMB"].store( d )
3551     if selfA._toStore("SigmaObs2"):
3552         TraceR = R.trace(Y.size)
3553         selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
3554     if selfA._toStore("MahalanobisConsistency"):
3555         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3556     if selfA._toStore("SimulationQuantiles"):
3557         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
3558     if selfA._toStore("SimulatedObservationAtBackground"):
3559         selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
3560     if selfA._toStore("SimulatedObservationAtOptimum"):
3561         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
3562     #
3563     return 0
3564
3565 # ==============================================================================
3566 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3567     """
3568     4DVAR
3569     """
3570     #
3571     # Initialisations
3572     # ---------------
3573     #
3574     # Opérateurs
3575     Hm = HO["Direct"].appliedControledFormTo
3576     Mm = EM["Direct"].appliedControledFormTo
3577     #
3578     if CM is not None and "Tangent" in CM and U is not None:
3579         Cm = CM["Tangent"].asMatrix(Xb)
3580     else:
3581         Cm = None
3582     #
3583     def Un(_step):
3584         if U is not None:
3585             if hasattr(U,"store") and 1<=_step<len(U) :
3586                 _Un = numpy.ravel( U[_step] ).reshape((-1,1))
3587             elif hasattr(U,"store") and len(U)==1:
3588                 _Un = numpy.ravel( U[0] ).reshape((-1,1))
3589             else:
3590                 _Un = numpy.ravel( U ).reshape((-1,1))
3591         else:
3592             _Un = None
3593         return _Un
3594     def CmUn(_xn,_un):
3595         if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
3596             _Cm   = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
3597             _CmUn = (_Cm @ _un).reshape((-1,1))
3598         else:
3599             _CmUn = 0.
3600         return _CmUn
3601     #
3602     # Remarque : les observations sont exploitées à partir du pas de temps
3603     # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
3604     # Donc le pas 0 n'est pas utilisé puisque la première étape commence
3605     # avec l'observation du pas 1.
3606     #
3607     # Nombre de pas identique au nombre de pas d'observations
3608     if hasattr(Y,"stepnumber"):
3609         duration = Y.stepnumber()
3610     else:
3611         duration = 2
3612     #
3613     # Précalcul des inversions de B et R
3614     BI = B.getI()
3615     RI = R.getI()
3616     #
3617     # Point de démarrage de l'optimisation
3618     Xini = selfA._parameters["InitializationPoint"]
3619     #
3620     # Définition de la fonction-coût
3621     # ------------------------------
3622     selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
3623     selfA.DirectInnovation  = [None,] # Le pas 0 n'est pas observé
3624     def CostFunction(x):
3625         _X  = numpy.asarray(x).reshape((-1,1))
3626         if selfA._parameters["StoreInternalVariables"] or \
3627             selfA._toStore("CurrentState") or \
3628             selfA._toStore("CurrentOptimum"):
3629             selfA.StoredVariables["CurrentState"].store( _X )
3630         Jb  = float( 0.5 * (_X - Xb).T * (BI * (_X - Xb)) )
3631         selfA.DirectCalculation = [None,]
3632         selfA.DirectInnovation  = [None,]
3633         Jo  = 0.
3634         _Xn = _X
3635         for step in range(0,duration-1):
3636             if hasattr(Y,"store"):
3637                 _Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
3638             else:
3639                 _Ynpu = numpy.ravel( Y ).reshape((-1,1))
3640             _Un = Un(step)
3641             #
3642             # Etape d'évolution
3643             if selfA._parameters["EstimationOf"] == "State":
3644                 _Xn = Mm( (_Xn, _Un) ).reshape((-1,1)) + CmUn(_Xn, _Un)
3645             elif selfA._parameters["EstimationOf"] == "Parameters":
3646                 pass
3647             #
3648             if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3649                 _Xn = ApplyBounds( _Xn, ForceNumericBounds(selfA._parameters["Bounds"]) )
3650             #
3651             # Etape de différence aux observations
3652             if selfA._parameters["EstimationOf"] == "State":
3653                 _YmHMX = _Ynpu - numpy.ravel( Hm( (_Xn, None) ) ).reshape((-1,1))
3654             elif selfA._parameters["EstimationOf"] == "Parameters":
3655                 _YmHMX = _Ynpu - numpy.ravel( Hm( (_Xn, _Un) ) ).reshape((-1,1)) - CmUn(_Xn, _Un)
3656             #
3657             # Stockage de l'état
3658             selfA.DirectCalculation.append( _Xn )
3659             selfA.DirectInnovation.append( _YmHMX )
3660             #
3661             # Ajout dans la fonctionnelle d'observation
3662             Jo = Jo + 0.5 * float( _YmHMX.T * (RI * _YmHMX) )
3663         J = Jb + Jo
3664         #
3665         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3666         selfA.StoredVariables["CostFunctionJb"].store( Jb )
3667         selfA.StoredVariables["CostFunctionJo"].store( Jo )
3668         selfA.StoredVariables["CostFunctionJ" ].store( J )
3669         if selfA._toStore("IndexOfOptimum") or \
3670             selfA._toStore("CurrentOptimum") or \
3671             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3672             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3673             selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3674             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3675         if selfA._toStore("IndexOfOptimum"):
3676             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3677         if selfA._toStore("CurrentOptimum"):
3678             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3679         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3680             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3681         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3682             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3683         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3684             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3685         return J
3686     #
3687     def GradientOfCostFunction(x):
3688         _X      = numpy.asarray(x).reshape((-1,1))
3689         GradJb  = BI * (_X - Xb)
3690         GradJo  = 0.
3691         for step in range(duration-1,0,-1):
3692             # Étape de récupération du dernier stockage de l'évolution
3693             _Xn = selfA.DirectCalculation.pop()
3694             # Étape de récupération du dernier stockage de l'innovation
3695             _YmHMX = selfA.DirectInnovation.pop()
3696             # Calcul des adjoints
3697             Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
3698             Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
3699             Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
3700             Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
3701             # Calcul du gradient par état adjoint
3702             GradJo = GradJo + Ha * (RI * _YmHMX) # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
3703             GradJo = Ma * GradJo                 # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
3704         GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
3705         return GradJ
3706     #
3707     # Minimisation de la fonctionnelle
3708     # --------------------------------
3709     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3710     #
3711     if selfA._parameters["Minimizer"] == "LBFGSB":
3712         if "0.19" <= scipy.version.version <= "1.1.0":
3713             import lbfgsbhlt as optimiseur
3714         else:
3715             import scipy.optimize as optimiseur
3716         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3717             func        = CostFunction,
3718             x0          = Xini,
3719             fprime      = GradientOfCostFunction,
3720             args        = (),
3721             bounds      = selfA._parameters["Bounds"],
3722             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
3723             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
3724             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3725             iprint      = selfA._parameters["optiprint"],
3726             )
3727         nfeval = Informations['funcalls']
3728         rc     = Informations['warnflag']
3729     elif selfA._parameters["Minimizer"] == "TNC":
3730         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3731             func        = CostFunction,
3732             x0          = Xini,
3733             fprime      = GradientOfCostFunction,
3734             args        = (),
3735             bounds      = selfA._parameters["Bounds"],
3736             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
3737             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3738             ftol        = selfA._parameters["CostDecrementTolerance"],
3739             messages    = selfA._parameters["optmessages"],
3740             )
3741     elif selfA._parameters["Minimizer"] == "CG":
3742         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3743             f           = CostFunction,
3744             x0          = Xini,
3745             fprime      = GradientOfCostFunction,
3746             args        = (),
3747             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3748             gtol        = selfA._parameters["GradientNormTolerance"],
3749             disp        = selfA._parameters["optdisp"],
3750             full_output = True,
3751             )
3752     elif selfA._parameters["Minimizer"] == "NCG":
3753         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3754             f           = CostFunction,
3755             x0          = Xini,
3756             fprime      = GradientOfCostFunction,
3757             args        = (),
3758             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3759             avextol     = selfA._parameters["CostDecrementTolerance"],
3760             disp        = selfA._parameters["optdisp"],
3761             full_output = True,
3762             )
3763     elif selfA._parameters["Minimizer"] == "BFGS":
3764         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3765             f           = CostFunction,
3766             x0          = Xini,
3767             fprime      = GradientOfCostFunction,
3768             args        = (),
3769             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3770             gtol        = selfA._parameters["GradientNormTolerance"],
3771             disp        = selfA._parameters["optdisp"],
3772             full_output = True,
3773             )
3774     else:
3775         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3776     #
3777     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3778     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3779     #
3780     # Correction pour pallier a un bug de TNC sur le retour du Minimum
3781     # ----------------------------------------------------------------
3782     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3783         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3784     #
3785     # Obtention de l'analyse
3786     # ----------------------
3787     Xa = Minimum
3788     #
3789     selfA.StoredVariables["Analysis"].store( Xa )
3790     #
3791     # Calculs et/ou stockages supplémentaires
3792     # ---------------------------------------
3793     if selfA._toStore("BMA"):
3794         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3795     #
3796     return 0
3797
3798 # ==============================================================================
3799 def stdkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3800     """
3801     Standard Kalman Filter
3802     """
3803     if selfA._parameters["EstimationOf"] == "Parameters":
3804         selfA._parameters["StoreInternalVariables"] = True
3805     #
3806     # Opérateurs
3807     # ----------
3808     Ht = HO["Tangent"].asMatrix(Xb)
3809     Ha = HO["Adjoint"].asMatrix(Xb)
3810     #
3811     if selfA._parameters["EstimationOf"] == "State":
3812         Mt = EM["Tangent"].asMatrix(Xb)
3813         Ma = EM["Adjoint"].asMatrix(Xb)
3814     #
3815     if CM is not None and "Tangent" in CM and U is not None:
3816         Cm = CM["Tangent"].asMatrix(Xb)
3817     else:
3818         Cm = None
3819     #
3820     # Durée d'observation et tailles
3821     if hasattr(Y,"stepnumber"):
3822         duration = Y.stepnumber()
3823         __p = numpy.cumprod(Y.shape())[-1]
3824     else:
3825         duration = 2
3826         __p = numpy.array(Y).size
3827     #
3828     # Précalcul des inversions de B et R
3829     if selfA._parameters["StoreInternalVariables"] \
3830         or selfA._toStore("CostFunctionJ") \
3831         or selfA._toStore("CostFunctionJb") \
3832         or selfA._toStore("CostFunctionJo") \
3833         or selfA._toStore("CurrentOptimum") \
3834         or selfA._toStore("APosterioriCovariance"):
3835         BI = B.getI()
3836         RI = R.getI()
3837     #
3838     __n = Xb.size
3839     nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
3840     #
3841     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
3842         Xn = Xb
3843         Pn = B
3844         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3845         selfA.StoredVariables["Analysis"].store( Xb )
3846         if selfA._toStore("APosterioriCovariance"):
3847             if hasattr(B,"asfullmatrix"):
3848                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
3849             else:
3850                 selfA.StoredVariables["APosterioriCovariance"].store( B )
3851         selfA._setInternalState("seed", numpy.random.get_state())
3852     elif selfA._parameters["nextStep"]:
3853         Xn = selfA._getInternalState("Xn")
3854         Pn = selfA._getInternalState("Pn")
3855     #
3856     if selfA._parameters["EstimationOf"] == "Parameters":
3857         XaMin            = Xn
3858         previousJMinimum = numpy.finfo(float).max
3859     #
3860     for step in range(duration-1):
3861         if hasattr(Y,"store"):
3862             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
3863         else:
3864             Ynpu = numpy.ravel( Y ).reshape((__p,1))
3865         #
3866         if U is not None:
3867             if hasattr(U,"store") and len(U)>1:
3868                 Un = numpy.ravel( U[step] ).reshape((-1,1))
3869             elif hasattr(U,"store") and len(U)==1:
3870                 Un = numpy.ravel( U[0] ).reshape((-1,1))
3871             else:
3872                 Un = numpy.ravel( U ).reshape((-1,1))
3873         else:
3874             Un = None
3875         #
3876         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
3877             Xn_predicted = Mt @ Xn
3878             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
3879                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
3880                 Xn_predicted = Xn_predicted + Cm @ Un
3881             Pn_predicted = Q + Mt * (Pn * Ma)
3882         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
3883             # --- > Par principe, M = Id, Q = 0
3884             Xn_predicted = Xn
3885             Pn_predicted = Pn
3886         #
3887         if selfA._parameters["EstimationOf"] == "State":
3888             HX_predicted = Ht @ Xn_predicted
3889             _Innovation  = Ynpu - HX_predicted
3890         elif selfA._parameters["EstimationOf"] == "Parameters":
3891             HX_predicted = Ht @ Xn_predicted
3892             _Innovation  = Ynpu - HX_predicted
3893             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
3894                 _Innovation = _Innovation - Cm @ Un
3895         #
3896         Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
3897         Xn = Xn_predicted + Kn * _Innovation
3898         Pn = Pn_predicted - Kn * Ht * Pn_predicted
3899         #
3900         Xa = Xn # Pointeurs
3901         #--------------------------
3902         selfA._setInternalState("Xn", Xn)
3903         selfA._setInternalState("Pn", Pn)
3904         #--------------------------
3905         #
3906         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3907         # ---> avec analysis
3908         selfA.StoredVariables["Analysis"].store( Xa )
3909         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
3910             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Ht * Xa )
3911         if selfA._toStore("InnovationAtCurrentAnalysis"):
3912             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
3913         # ---> avec current state
3914         if selfA._parameters["StoreInternalVariables"] \
3915             or selfA._toStore("CurrentState"):
3916             selfA.StoredVariables["CurrentState"].store( Xn )
3917         if selfA._toStore("ForecastState"):
3918             selfA.StoredVariables["ForecastState"].store( Xn_predicted )
3919         if selfA._toStore("ForecastCovariance"):
3920             selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
3921         if selfA._toStore("BMA"):
3922             selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
3923         if selfA._toStore("InnovationAtCurrentState"):
3924             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3925         if selfA._toStore("SimulatedObservationAtCurrentState") \
3926             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3927             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
3928         # ---> autres
3929         if selfA._parameters["StoreInternalVariables"] \
3930             or selfA._toStore("CostFunctionJ") \
3931             or selfA._toStore("CostFunctionJb") \
3932             or selfA._toStore("CostFunctionJo") \
3933             or selfA._toStore("CurrentOptimum") \
3934             or selfA._toStore("APosterioriCovariance"):
3935             Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
3936             Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
3937             J   = Jb + Jo
3938             selfA.StoredVariables["CostFunctionJb"].store( Jb )
3939             selfA.StoredVariables["CostFunctionJo"].store( Jo )
3940             selfA.StoredVariables["CostFunctionJ" ].store( J )
3941             #
3942             if selfA._toStore("IndexOfOptimum") \
3943                 or selfA._toStore("CurrentOptimum") \
3944                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
3945                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
3946                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
3947                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3948                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3949             if selfA._toStore("IndexOfOptimum"):
3950                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3951             if selfA._toStore("CurrentOptimum"):
3952                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
3953             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3954                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
3955             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3956                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3957             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3958                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3959             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3960                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3961         if selfA._toStore("APosterioriCovariance"):
3962             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3963         if selfA._parameters["EstimationOf"] == "Parameters" \
3964             and J < previousJMinimum:
3965             previousJMinimum    = J
3966             XaMin               = Xa
3967             if selfA._toStore("APosterioriCovariance"):
3968                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
3969     #
3970     # Stockage final supplémentaire de l'optimum en estimation de paramètres
3971     # ----------------------------------------------------------------------
3972     if selfA._parameters["EstimationOf"] == "Parameters":
3973         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3974         selfA.StoredVariables["Analysis"].store( XaMin )
3975         if selfA._toStore("APosterioriCovariance"):
3976             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3977         if selfA._toStore("BMA"):
3978             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3979     #
3980     return 0
3981
3982 # ==============================================================================
3983 def uskf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3984     """
3985     Unscented Kalman Filter
3986     """
3987     if selfA._parameters["EstimationOf"] == "Parameters":
3988         selfA._parameters["StoreInternalVariables"] = True
3989     #
3990     L     = Xb.size
3991     Alpha = selfA._parameters["Alpha"]
3992     Beta  = selfA._parameters["Beta"]
3993     if selfA._parameters["Kappa"] == 0:
3994         if selfA._parameters["EstimationOf"] == "State":
3995             Kappa = 0
3996         elif selfA._parameters["EstimationOf"] == "Parameters":
3997             Kappa = 3 - L
3998     else:
3999         Kappa = selfA._parameters["Kappa"]
4000     Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
4001     Gamma  = math.sqrt( L + Lambda )
4002     #
4003     Ww = []
4004     Ww.append( 0. )
4005     for i in range(2*L):
4006         Ww.append( 1. / (2.*(L + Lambda)) )
4007     #
4008     Wm = numpy.array( Ww )
4009     Wm[0] = Lambda / (L + Lambda)
4010     Wc = numpy.array( Ww )
4011     Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
4012     #
4013     # Opérateurs
4014     Hm = HO["Direct"].appliedControledFormTo
4015     #
4016     if selfA._parameters["EstimationOf"] == "State":
4017         Mm = EM["Direct"].appliedControledFormTo
4018     #
4019     if CM is not None and "Tangent" in CM and U is not None:
4020         Cm = CM["Tangent"].asMatrix(Xb)
4021     else:
4022         Cm = None
4023     #
4024     # Durée d'observation et tailles
4025     if hasattr(Y,"stepnumber"):
4026         duration = Y.stepnumber()
4027         __p = numpy.cumprod(Y.shape())[-1]
4028     else:
4029         duration = 2
4030         __p = numpy.array(Y).size
4031     #
4032     # Précalcul des inversions de B et R
4033     if selfA._parameters["StoreInternalVariables"] \
4034         or selfA._toStore("CostFunctionJ") \
4035         or selfA._toStore("CostFunctionJb") \
4036         or selfA._toStore("CostFunctionJo") \
4037         or selfA._toStore("CurrentOptimum") \
4038         or selfA._toStore("APosterioriCovariance"):
4039         BI = B.getI()
4040         RI = R.getI()
4041     #
4042     __n = Xb.size
4043     nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
4044     #
4045     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
4046         Xn = Xb
4047         if hasattr(B,"asfullmatrix"):
4048             Pn = B.asfullmatrix(__n)
4049         else:
4050             Pn = B
4051         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
4052         selfA.StoredVariables["Analysis"].store( Xb )
4053         if selfA._toStore("APosterioriCovariance"):
4054             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
4055     elif selfA._parameters["nextStep"]:
4056         Xn = selfA._getInternalState("Xn")
4057         Pn = selfA._getInternalState("Pn")
4058     #
4059     if selfA._parameters["EstimationOf"] == "Parameters":
4060         XaMin            = Xn
4061         previousJMinimum = numpy.finfo(float).max
4062     #
4063     for step in range(duration-1):
4064         if hasattr(Y,"store"):
4065             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
4066         else:
4067             Ynpu = numpy.ravel( Y ).reshape((__p,1))
4068         #
4069         if U is not None:
4070             if hasattr(U,"store") and len(U)>1:
4071                 Un = numpy.ravel( U[step] ).reshape((-1,1))
4072             elif hasattr(U,"store") and len(U)==1:
4073                 Un = numpy.ravel( U[0] ).reshape((-1,1))
4074             else:
4075                 Un = numpy.ravel( U ).reshape((-1,1))
4076         else:
4077             Un = None
4078         #
4079         Pndemi = numpy.real(scipy.linalg.sqrtm(Pn))
4080         Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
4081         nbSpts = 2*Xn.size+1
4082         #
4083         XEtnnp = []
4084         for point in range(nbSpts):
4085             if selfA._parameters["EstimationOf"] == "State":
4086                 XEtnnpi = numpy.asarray( Mm( (Xnp[:,point], Un) ) ).reshape((-1,1))
4087                 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
4088                     Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
4089                     XEtnnpi = XEtnnpi + Cm @ Un
4090             elif selfA._parameters["EstimationOf"] == "Parameters":
4091                 # --- > Par principe, M = Id, Q = 0
4092                 XEtnnpi = Xnp[:,point]
4093             XEtnnp.append( numpy.ravel(XEtnnpi).reshape((-1,1)) )
4094         XEtnnp = numpy.concatenate( XEtnnp, axis=1 )
4095         #
4096         Xncm = ( XEtnnp * Wm ).sum(axis=1)
4097         #
4098         if selfA._parameters["EstimationOf"] == "State":        Pnm = Q
4099         elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
4100         for point in range(nbSpts):
4101             Pnm += Wc[i] * ((XEtnnp[:,point]-Xncm).reshape((-1,1)) * (XEtnnp[:,point]-Xncm))
4102         #
4103         Pnmdemi = numpy.real(scipy.linalg.sqrtm(Pnm))
4104         #
4105         Xnnp = numpy.hstack([Xncm.reshape((-1,1)), Xncm.reshape((-1,1))+Gamma*Pnmdemi, Xncm.reshape((-1,1))-Gamma*Pnmdemi])
4106         #
4107         Ynnp = []
4108         for point in range(nbSpts):
4109             if selfA._parameters["EstimationOf"] == "State":
4110                 Ynnpi = Hm( (Xnnp[:,point], None) )
4111             elif selfA._parameters["EstimationOf"] == "Parameters":
4112                 Ynnpi = Hm( (Xnnp[:,point], Un) )
4113             Ynnp.append( numpy.ravel(Ynnpi).reshape((-1,1)) )
4114         Ynnp = numpy.concatenate( Ynnp, axis=1 )
4115         #
4116         Yncm = ( Ynnp * Wm ).sum(axis=1)
4117         #
4118         Pyyn = R
4119         Pxyn = 0.
4120         for point in range(nbSpts):
4121             Pyyn += Wc[i] * ((Ynnp[:,point]-Yncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
4122             Pxyn += Wc[i] * ((Xnnp[:,point]-Xncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
4123         #
4124         _Innovation  = Ynpu - Yncm.reshape((-1,1))
4125         if selfA._parameters["EstimationOf"] == "Parameters":
4126             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
4127                 _Innovation = _Innovation - Cm @ Un
4128         #
4129         Kn = Pxyn * Pyyn.I
4130         Xn = Xncm.reshape((-1,1)) + Kn * _Innovation
4131         Pn = Pnm - Kn * Pyyn * Kn.T
4132         #
4133         Xa = Xn # Pointeurs
4134         #--------------------------
4135         selfA._setInternalState("Xn", Xn)
4136         selfA._setInternalState("Pn", Pn)
4137         #--------------------------
4138         #
4139         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
4140         # ---> avec analysis
4141         selfA.StoredVariables["Analysis"].store( Xa )
4142         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
4143             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) )
4144         if selfA._toStore("InnovationAtCurrentAnalysis"):
4145             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
4146         # ---> avec current state
4147         if selfA._parameters["StoreInternalVariables"] \
4148             or selfA._toStore("CurrentState"):
4149             selfA.StoredVariables["CurrentState"].store( Xn )
4150         if selfA._toStore("ForecastState"):
4151             selfA.StoredVariables["ForecastState"].store( Xncm )
4152         if selfA._toStore("ForecastCovariance"):
4153             selfA.StoredVariables["ForecastCovariance"].store( Pnm )
4154         if selfA._toStore("BMA"):
4155             selfA.StoredVariables["BMA"].store( Xncm - Xa )
4156         if selfA._toStore("InnovationAtCurrentState"):
4157             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
4158         if selfA._toStore("SimulatedObservationAtCurrentState") \
4159             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4160             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm )
4161         # ---> autres
4162         if selfA._parameters["StoreInternalVariables"] \
4163             or selfA._toStore("CostFunctionJ") \
4164             or selfA._toStore("CostFunctionJb") \
4165             or selfA._toStore("CostFunctionJo") \
4166             or selfA._toStore("CurrentOptimum") \
4167             or selfA._toStore("APosterioriCovariance"):
4168             Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
4169             Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
4170             J   = Jb + Jo
4171             selfA.StoredVariables["CostFunctionJb"].store( Jb )
4172             selfA.StoredVariables["CostFunctionJo"].store( Jo )
4173             selfA.StoredVariables["CostFunctionJ" ].store( J )
4174             #
4175             if selfA._toStore("IndexOfOptimum") \
4176                 or selfA._toStore("CurrentOptimum") \
4177                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
4178                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
4179                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
4180                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4181                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4182             if selfA._toStore("IndexOfOptimum"):
4183                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
4184             if selfA._toStore("CurrentOptimum"):
4185                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
4186             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4187                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
4188             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
4189                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
4190             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
4191                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
4192             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
4193                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
4194         if selfA._toStore("APosterioriCovariance"):
4195             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
4196         if selfA._parameters["EstimationOf"] == "Parameters" \
4197             and J < previousJMinimum:
4198             previousJMinimum    = J
4199             XaMin               = Xa
4200             if selfA._toStore("APosterioriCovariance"):
4201                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
4202     #
4203     # Stockage final supplémentaire de l'optimum en estimation de paramètres
4204     # ----------------------------------------------------------------------
4205     if selfA._parameters["EstimationOf"] == "Parameters":
4206         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
4207         selfA.StoredVariables["Analysis"].store( XaMin )
4208         if selfA._toStore("APosterioriCovariance"):
4209             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
4210         if selfA._toStore("BMA"):
4211             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
4212     #
4213     return 0
4214
4215 # ==============================================================================
4216 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
4217     """
4218     3DVAR variational analysis with no inversion of B
4219     """
4220     #
4221     # Initialisations
4222     # ---------------
4223     Hm = HO["Direct"].appliedTo
4224     Ha = HO["Adjoint"].appliedInXTo
4225     #
4226     BT = B.getT()
4227     RI = R.getI()
4228     #
4229     Xini = numpy.zeros(Xb.size)
4230     #
4231     # Définition de la fonction-coût
4232     # ------------------------------
4233     def CostFunction(v):
4234         _V = numpy.asarray(v).reshape((-1,1))
4235         _X = Xb + (B @ _V).reshape((-1,1))
4236         if selfA._parameters["StoreInternalVariables"] or \
4237             selfA._toStore("CurrentState") or \
4238             selfA._toStore("CurrentOptimum"):
4239             selfA.StoredVariables["CurrentState"].store( _X )
4240         _HX = numpy.asarray(Hm( _X )).reshape((-1,1))
4241         _Innovation = Y - _HX
4242         if selfA._toStore("SimulatedObservationAtCurrentState") or \
4243             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4244             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
4245         if selfA._toStore("InnovationAtCurrentState"):
4246             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
4247         #
4248         Jb  = float( 0.5 * _V.T * (BT * _V) )
4249         Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
4250         J   = Jb + Jo
4251         #
4252         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
4253         selfA.StoredVariables["CostFunctionJb"].store( Jb )
4254         selfA.StoredVariables["CostFunctionJo"].store( Jo )
4255         selfA.StoredVariables["CostFunctionJ" ].store( J )
4256         if selfA._toStore("IndexOfOptimum") or \
4257             selfA._toStore("CurrentOptimum") or \
4258             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
4259             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
4260             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
4261             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4262             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4263         if selfA._toStore("IndexOfOptimum"):
4264             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
4265         if selfA._toStore("CurrentOptimum"):
4266             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
4267         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4268             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
4269         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
4270             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
4271         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
4272             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
4273         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
4274             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
4275         return J
4276     #
4277     def GradientOfCostFunction(v):
4278         _V = numpy.asarray(v).reshape((-1,1))
4279         _X = Xb + (B @ _V).reshape((-1,1))
4280         _HX     = numpy.asarray(Hm( _X )).reshape((-1,1))
4281         GradJb  = BT * _V
4282         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
4283         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
4284         return GradJ
4285     #
4286     # Minimisation de la fonctionnelle
4287     # --------------------------------
4288     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
4289     #
4290     if selfA._parameters["Minimizer"] == "LBFGSB":
4291         if "0.19" <= scipy.version.version <= "1.1.0":
4292             import lbfgsbhlt as optimiseur
4293         else:
4294             import scipy.optimize as optimiseur
4295         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
4296             func        = CostFunction,
4297             x0          = Xini,
4298             fprime      = GradientOfCostFunction,
4299             args        = (),
4300             bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
4301             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
4302             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
4303             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
4304             iprint      = selfA._parameters["optiprint"],
4305             )
4306         nfeval = Informations['funcalls']
4307         rc     = Informations['warnflag']
4308     elif selfA._parameters["Minimizer"] == "TNC":
4309         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
4310             func        = CostFunction,
4311             x0          = Xini,
4312             fprime      = GradientOfCostFunction,
4313             args        = (),
4314             bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
4315             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
4316             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
4317             ftol        = selfA._parameters["CostDecrementTolerance"],
4318             messages    = selfA._parameters["optmessages"],
4319             )
4320     elif selfA._parameters["Minimizer"] == "CG":
4321         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
4322             f           = CostFunction,
4323             x0          = Xini,
4324             fprime      = GradientOfCostFunction,
4325             args        = (),
4326             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
4327             gtol        = selfA._parameters["GradientNormTolerance"],
4328             disp        = selfA._parameters["optdisp"],
4329             full_output = True,
4330             )
4331     elif selfA._parameters["Minimizer"] == "NCG":
4332         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
4333             f           = CostFunction,
4334             x0          = Xini,
4335             fprime      = GradientOfCostFunction,
4336             args        = (),
4337             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
4338             avextol     = selfA._parameters["CostDecrementTolerance"],
4339             disp        = selfA._parameters["optdisp"],
4340             full_output = True,
4341             )
4342     elif selfA._parameters["Minimizer"] == "BFGS":
4343         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
4344             f           = CostFunction,
4345             x0          = Xini,
4346             fprime      = GradientOfCostFunction,
4347             args        = (),
4348             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
4349             gtol        = selfA._parameters["GradientNormTolerance"],
4350             disp        = selfA._parameters["optdisp"],
4351             full_output = True,
4352             )
4353     else:
4354         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
4355     #
4356     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4357     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
4358     #
4359     # Correction pour pallier a un bug de TNC sur le retour du Minimum
4360     # ----------------------------------------------------------------
4361     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
4362         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
4363     else:
4364         Minimum = Xb + B * Minimum.reshape((-1,1)) # Pas @
4365     #
4366     Xa = Minimum
4367     #--------------------------
4368     #
4369     selfA.StoredVariables["Analysis"].store( Xa )
4370     #
4371     if selfA._toStore("OMA") or \
4372         selfA._toStore("SigmaObs2") or \
4373         selfA._toStore("SimulationQuantiles") or \
4374         selfA._toStore("SimulatedObservationAtOptimum"):
4375         if selfA._toStore("SimulatedObservationAtCurrentState"):
4376             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
4377         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4378             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
4379         else:
4380             HXa = Hm( Xa )
4381     #
4382     if selfA._toStore("APosterioriCovariance") or \
4383         selfA._toStore("SimulationQuantiles") or \
4384         selfA._toStore("JacobianMatrixAtOptimum") or \
4385         selfA._toStore("KalmanGainAtOptimum"):
4386         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
4387         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
4388     if selfA._toStore("APosterioriCovariance") or \
4389         selfA._toStore("SimulationQuantiles") or \
4390         selfA._toStore("KalmanGainAtOptimum"):
4391         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
4392         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
4393     if selfA._toStore("APosterioriCovariance") or \
4394         selfA._toStore("SimulationQuantiles"):
4395         BI = B.getI()
4396         A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
4397     if selfA._toStore("APosterioriCovariance"):
4398         selfA.StoredVariables["APosterioriCovariance"].store( A )
4399     if selfA._toStore("JacobianMatrixAtOptimum"):
4400         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
4401     if selfA._toStore("KalmanGainAtOptimum"):
4402         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
4403         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
4404         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
4405     #
4406     # Calculs et/ou stockages supplémentaires
4407     # ---------------------------------------
4408     if selfA._toStore("Innovation") or \
4409         selfA._toStore("SigmaObs2") or \
4410         selfA._toStore("MahalanobisConsistency") or \
4411         selfA._toStore("OMB"):
4412         d  = Y - HXb
4413     if selfA._toStore("Innovation"):
4414         selfA.StoredVariables["Innovation"].store( d )
4415     if selfA._toStore("BMA"):
4416         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
4417     if selfA._toStore("OMA"):
4418         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
4419     if selfA._toStore("OMB"):
4420         selfA.StoredVariables["OMB"].store( d )
4421     if selfA._toStore("SigmaObs2"):
4422         TraceR = R.trace(Y.size)
4423         selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
4424     if selfA._toStore("MahalanobisConsistency"):
4425         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
4426     if selfA._toStore("SimulationQuantiles"):
4427         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
4428     if selfA._toStore("SimulatedObservationAtBackground"):
4429         selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
4430     if selfA._toStore("SimulatedObservationAtOptimum"):
4431         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
4432     #
4433     return 0
4434
4435 # ==============================================================================
4436 if __name__ == "__main__":
4437     print('\n AUTODIAGNOSTIC\n')