Salome HOME
Minor documentation and code review corrections (3)
[modules/adao.git] / src / daComposant / daCore / NumericObjects.py
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) 2008-2021 EDF R&D
4 #
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
9 #
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13 # Lesser General Public License for more details.
14 #
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18 #
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
22
23 __doc__ = """
24     Définit les objets numériques génériques.
25 """
26 __author__ = "Jean-Philippe ARGAUD"
27
28 import os, time, copy, types, sys, logging
29 import math, numpy, scipy, scipy.optimize, scipy.version
30 from daCore.BasicObjects import Operator, Covariance, PartialAlgorithm
31 from daCore.PlatformInfo import PlatformInfo
32 mpr = PlatformInfo().MachinePrecision()
33 mfp = PlatformInfo().MaximumPrecision()
34 # logging.getLogger().setLevel(logging.DEBUG)
35
36 # ==============================================================================
37 def ExecuteFunction( triplet ):
38     assert len(triplet) == 3, "Incorrect number of arguments"
39     X, xArgs, funcrepr = triplet
40     __X = numpy.ravel( X ).reshape((-1,1))
41     __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
42     __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
43     __fonction = getattr(__module,funcrepr["__userFunction__name"])
44     sys.path = __sys_path_tmp ; del __sys_path_tmp
45     if isinstance(xArgs, dict):
46         __HX  = __fonction( __X, **xArgs )
47     else:
48         __HX  = __fonction( __X )
49     return numpy.ravel( __HX )
50
51 # ==============================================================================
52 class FDApproximation(object):
53     """
54     Cette classe sert d'interface pour définir les opérateurs approximés. A la
55     création d'un objet, en fournissant une fonction "Function", on obtient un
56     objet qui dispose de 3 méthodes "DirectOperator", "TangentOperator" et
57     "AdjointOperator". On contrôle l'approximation DF avec l'incrément
58     multiplicatif "increment" valant par défaut 1%, ou avec l'incrément fixe
59     "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
60     centrées si le booléen "centeredDF" est vrai.
61     """
62     def __init__(self,
63             name                  = "FDApproximation",
64             Function              = None,
65             centeredDF            = False,
66             increment             = 0.01,
67             dX                    = None,
68             extraArguments        = None,
69             reducingMemoryUse     = False,
70             avoidingRedundancy    = True,
71             toleranceInRedundancy = 1.e-18,
72             lenghtOfRedundancy    = -1,
73             mpEnabled             = False,
74             mpWorkers             = None,
75             mfEnabled             = False,
76             ):
77         self.__name = str(name)
78         self.__extraArgs = extraArguments
79         #
80         if mpEnabled:
81             try:
82                 import multiprocessing
83                 self.__mpEnabled = True
84             except ImportError:
85                 self.__mpEnabled = False
86         else:
87             self.__mpEnabled = False
88         self.__mpWorkers = mpWorkers
89         if self.__mpWorkers is not None and self.__mpWorkers < 1:
90             self.__mpWorkers = None
91         logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
92         #
93         self.__mfEnabled = bool(mfEnabled)
94         logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
95         #
96         self.__rmEnabled = bool(reducingMemoryUse)
97         logging.debug("FDA Calculs avec réduction mémoire : %s"%(self.__rmEnabled,))
98         #
99         if avoidingRedundancy:
100             self.__avoidRC = True
101             self.__tolerBP = float(toleranceInRedundancy)
102             self.__lenghtRJ = int(lenghtOfRedundancy)
103             self.__listJPCP = [] # Jacobian Previous Calculated Points
104             self.__listJPCI = [] # Jacobian Previous Calculated Increment
105             self.__listJPCR = [] # Jacobian Previous Calculated Results
106             self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
107             self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
108         else:
109             self.__avoidRC = False
110         logging.debug("FDA Calculs avec réduction des doublons : %s"%self.__avoidRC)
111         if self.__avoidRC:
112             logging.debug("FDA Tolérance de détermination des doublons : %.2e"%self.__tolerBP)
113         #
114         if self.__mpEnabled:
115             if isinstance(Function,types.FunctionType):
116                 logging.debug("FDA Calculs en multiprocessing : FunctionType")
117                 self.__userFunction__name = Function.__name__
118                 try:
119                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
120                 except:
121                     mod = os.path.abspath(Function.__globals__['__file__'])
122                 if not os.path.isfile(mod):
123                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
124                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
125                 self.__userFunction__path = os.path.dirname(mod)
126                 del mod
127                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
128                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
129             elif isinstance(Function,types.MethodType):
130                 logging.debug("FDA Calculs en multiprocessing : MethodType")
131                 self.__userFunction__name = Function.__name__
132                 try:
133                     mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
134                 except:
135                     mod = os.path.abspath(Function.__func__.__globals__['__file__'])
136                 if not os.path.isfile(mod):
137                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
138                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
139                 self.__userFunction__path = os.path.dirname(mod)
140                 del mod
141                 self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
142                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
143             else:
144                 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
145         else:
146             self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
147             self.__userFunction = self.__userOperator.appliedTo
148         #
149         self.__centeredDF = bool(centeredDF)
150         if abs(float(increment)) > 1.e-15:
151             self.__increment  = float(increment)
152         else:
153             self.__increment  = 0.01
154         if dX is None:
155             self.__dX     = None
156         else:
157             self.__dX     = numpy.ravel( dX )
158
159     # ---------------------------------------------------------
160     def __doublon__(self, e, l, n, v=None):
161         __ac, __iac = False, -1
162         for i in range(len(l)-1,-1,-1):
163             if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
164                 __ac, __iac = True, i
165                 if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
166                 break
167         return __ac, __iac
168
169     # ---------------------------------------------------------
170     def __listdotwith__(self, __LMatrix, __dotWith = None, __dotTWith = None):
171         "Produit incrémental d'une matrice liste de colonnes avec un vecteur"
172         if not isinstance(__LMatrix, (list,tuple)):
173             raise TypeError("Columnwise list matrix has not the proper type: %s"%type(__LMatrix))
174         if __dotWith is not None:
175             __Idwx = numpy.ravel( __dotWith )
176             assert len(__LMatrix) == __Idwx.size, "Incorrect size of elements"
177             __Produit = numpy.zeros(__LMatrix[0].size)
178             for i, col in enumerate(__LMatrix):
179                 __Produit += float(__Idwx[i]) * col
180             return __Produit
181         elif __dotTWith is not None:
182             _Idwy = numpy.ravel( __dotTWith ).T
183             assert __LMatrix[0].size == _Idwy.size, "Incorrect size of elements"
184             __Produit = numpy.zeros(len(__LMatrix))
185             for i, col in enumerate(__LMatrix):
186                 __Produit[i] = float( _Idwy @ col)
187             return __Produit
188         else:
189             __Produit = None
190         return __Produit
191
192     # ---------------------------------------------------------
193     def DirectOperator(self, X, **extraArgs ):
194         """
195         Calcul du direct à l'aide de la fonction fournie.
196
197         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
198         ne doivent pas être données ici à la fonction utilisateur.
199         """
200         logging.debug("FDA Calcul DirectOperator (explicite)")
201         if self.__mfEnabled:
202             _HX = self.__userFunction( X, argsAsSerie = True )
203         else:
204             _HX = numpy.ravel(self.__userFunction( numpy.ravel(X) ))
205         #
206         return _HX
207
208     # ---------------------------------------------------------
209     def TangentMatrix(self, X, dotWith = None, dotTWith = None ):
210         """
211         Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
212         c'est-à-dire le gradient de H en X. On utilise des différences finies
213         directionnelles autour du point X. X est un numpy.ndarray.
214
215         Différences finies centrées (approximation d'ordre 2):
216         1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
217            dX[i] à la  composante X[i], pour composer X_plus_dXi et X_moins_dXi, et
218            on calcule les réponses HX_plus_dXi = H( X_plus_dXi ) et HX_moins_dXi =
219            H( X_moins_dXi )
220         2/ On effectue les différences (HX_plus_dXi-HX_moins_dXi) et on divise par
221            le pas 2*dXi
222         3/ Chaque résultat, par composante, devient une colonne de la Jacobienne
223
224         Différences finies non centrées (approximation d'ordre 1):
225         1/ Pour chaque composante i de X, on ajoute la perturbation dX[i] à la
226            composante X[i] pour composer X_plus_dXi, et on calcule la réponse
227            HX_plus_dXi = H( X_plus_dXi )
228         2/ On calcule la valeur centrale HX = H(X)
229         3/ On effectue les différences (HX_plus_dXi-HX) et on divise par
230            le pas dXi
231         4/ Chaque résultat, par composante, devient une colonne de la Jacobienne
232
233         """
234         logging.debug("FDA Début du calcul de la Jacobienne")
235         logging.debug("FDA   Incrément de............: %s*X"%float(self.__increment))
236         logging.debug("FDA   Approximation centrée...: %s"%(self.__centeredDF))
237         #
238         if X is None or len(X)==0:
239             raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
240         #
241         _X = numpy.ravel( X )
242         #
243         if self.__dX is None:
244             _dX  = self.__increment * _X
245         else:
246             _dX = numpy.ravel( self.__dX )
247         assert len(_X) == len(_dX), "Inconsistent dX increment length with respect to the X one"
248         assert _X.size == _dX.size, "Inconsistent dX increment size with respect to the X one"
249         #
250         if (_dX == 0.).any():
251             moyenne = _dX.mean()
252             if moyenne == 0.:
253                 _dX = numpy.where( _dX == 0., float(self.__increment), _dX )
254             else:
255                 _dX = numpy.where( _dX == 0., moyenne, _dX )
256         #
257         __alreadyCalculated  = False
258         if self.__avoidRC:
259             __bidon, __alreadyCalculatedP = self.__doublon__(_X,  self.__listJPCP, self.__listJPPN, None)
260             __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
261             if __alreadyCalculatedP == __alreadyCalculatedI > -1:
262                 __alreadyCalculated, __i = True, __alreadyCalculatedP
263                 logging.debug("FDA Cas J déjà calculé, récupération du doublon %i"%__i)
264         #
265         if __alreadyCalculated:
266             logging.debug("FDA   Calcul Jacobienne (par récupération du doublon %i)"%__i)
267             _Jacobienne = self.__listJPCR[__i]
268             logging.debug("FDA Fin du calcul de la Jacobienne")
269             if dotWith is not None:
270                 return numpy.dot(_Jacobienne,   numpy.ravel( dotWith ))
271             elif dotTWith is not None:
272                 return numpy.dot(_Jacobienne.T, numpy.ravel( dotTWith ))
273         else:
274             logging.debug("FDA   Calcul Jacobienne (explicite)")
275             if self.__centeredDF:
276                 #
277                 if self.__mpEnabled and not self.__mfEnabled:
278                     funcrepr = {
279                         "__userFunction__path" : self.__userFunction__path,
280                         "__userFunction__modl" : self.__userFunction__modl,
281                         "__userFunction__name" : self.__userFunction__name,
282                     }
283                     _jobs = []
284                     for i in range( len(_dX) ):
285                         _dXi            = _dX[i]
286                         _X_plus_dXi     = numpy.array( _X, dtype=float )
287                         _X_plus_dXi[i]  = _X[i] + _dXi
288                         _X_moins_dXi    = numpy.array( _X, dtype=float )
289                         _X_moins_dXi[i] = _X[i] - _dXi
290                         #
291                         _jobs.append( (_X_plus_dXi,  self.__extraArgs, funcrepr) )
292                         _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
293                     #
294                     import multiprocessing
295                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
296                     _HX_plusmoins_dX = self.__pool.map( ExecuteFunction, _jobs )
297                     self.__pool.close()
298                     self.__pool.join()
299                     #
300                     _Jacobienne  = []
301                     for i in range( len(_dX) ):
302                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
303                     #
304                 elif self.__mfEnabled:
305                     _xserie = []
306                     for i in range( len(_dX) ):
307                         _dXi            = _dX[i]
308                         _X_plus_dXi     = numpy.array( _X, dtype=float )
309                         _X_plus_dXi[i]  = _X[i] + _dXi
310                         _X_moins_dXi    = numpy.array( _X, dtype=float )
311                         _X_moins_dXi[i] = _X[i] - _dXi
312                         #
313                         _xserie.append( _X_plus_dXi )
314                         _xserie.append( _X_moins_dXi )
315                     #
316                     _HX_plusmoins_dX = self.DirectOperator( _xserie )
317                      #
318                     _Jacobienne  = []
319                     for i in range( len(_dX) ):
320                         _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
321                     #
322                 else:
323                     _Jacobienne  = []
324                     for i in range( _dX.size ):
325                         _dXi            = _dX[i]
326                         _X_plus_dXi     = numpy.array( _X, dtype=float )
327                         _X_plus_dXi[i]  = _X[i] + _dXi
328                         _X_moins_dXi    = numpy.array( _X, dtype=float )
329                         _X_moins_dXi[i] = _X[i] - _dXi
330                         #
331                         _HX_plus_dXi    = self.DirectOperator( _X_plus_dXi )
332                         _HX_moins_dXi   = self.DirectOperator( _X_moins_dXi )
333                         #
334                         _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
335                 #
336             else:
337                 #
338                 if self.__mpEnabled and not self.__mfEnabled:
339                     funcrepr = {
340                         "__userFunction__path" : self.__userFunction__path,
341                         "__userFunction__modl" : self.__userFunction__modl,
342                         "__userFunction__name" : self.__userFunction__name,
343                     }
344                     _jobs = []
345                     _jobs.append( (_X, self.__extraArgs, funcrepr) )
346                     for i in range( len(_dX) ):
347                         _X_plus_dXi    = numpy.array( _X, dtype=float )
348                         _X_plus_dXi[i] = _X[i] + _dX[i]
349                         #
350                         _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
351                     #
352                     import multiprocessing
353                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
354                     _HX_plus_dX = self.__pool.map( ExecuteFunction, _jobs )
355                     self.__pool.close()
356                     self.__pool.join()
357                     #
358                     _HX = _HX_plus_dX.pop(0)
359                     #
360                     _Jacobienne = []
361                     for i in range( len(_dX) ):
362                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
363                     #
364                 elif self.__mfEnabled:
365                     _xserie = []
366                     _xserie.append( _X )
367                     for i in range( len(_dX) ):
368                         _X_plus_dXi    = numpy.array( _X, dtype=float )
369                         _X_plus_dXi[i] = _X[i] + _dX[i]
370                         #
371                         _xserie.append( _X_plus_dXi )
372                     #
373                     _HX_plus_dX = self.DirectOperator( _xserie )
374                     #
375                     _HX = _HX_plus_dX.pop(0)
376                     #
377                     _Jacobienne = []
378                     for i in range( len(_dX) ):
379                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
380                    #
381                 else:
382                     _Jacobienne  = []
383                     _HX = self.DirectOperator( _X )
384                     for i in range( _dX.size ):
385                         _dXi            = _dX[i]
386                         _X_plus_dXi     = numpy.array( _X, dtype=float )
387                         _X_plus_dXi[i]  = _X[i] + _dXi
388                         #
389                         _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
390                         #
391                         _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
392             #
393             if (dotWith is not None) or (dotTWith is not None):
394                 __Produit = self.__listdotwith__(_Jacobienne, dotWith, dotTWith)
395             else:
396                 __Produit = None
397             if __Produit is None or self.__avoidRC:
398                 _Jacobienne = numpy.transpose( numpy.vstack( _Jacobienne ) )
399                 if self.__avoidRC:
400                     if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
401                     while len(self.__listJPCP) > self.__lenghtRJ:
402                         self.__listJPCP.pop(0)
403                         self.__listJPCI.pop(0)
404                         self.__listJPCR.pop(0)
405                         self.__listJPPN.pop(0)
406                         self.__listJPIN.pop(0)
407                     self.__listJPCP.append( copy.copy(_X) )
408                     self.__listJPCI.append( copy.copy(_dX) )
409                     self.__listJPCR.append( copy.copy(_Jacobienne) )
410                     self.__listJPPN.append( numpy.linalg.norm(_X) )
411                     self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
412             logging.debug("FDA Fin du calcul de la Jacobienne")
413             if __Produit is not None:
414                 return __Produit
415         #
416         return _Jacobienne
417
418     # ---------------------------------------------------------
419     def TangentOperator(self, paire, **extraArgs ):
420         """
421         Calcul du tangent à l'aide de la Jacobienne.
422
423         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
424         ne doivent pas être données ici à la fonction utilisateur.
425         """
426         if self.__mfEnabled:
427             assert len(paire) == 1, "Incorrect length of arguments"
428             _paire = paire[0]
429             assert len(_paire) == 2, "Incorrect number of arguments"
430         else:
431             assert len(paire) == 2, "Incorrect number of arguments"
432             _paire = paire
433         X, dX = _paire
434         if dX is None or len(dX) == 0:
435             #
436             # Calcul de la forme matricielle si le second argument est None
437             # -------------------------------------------------------------
438             _Jacobienne = self.TangentMatrix( X )
439             if self.__mfEnabled: return [_Jacobienne,]
440             else:                return _Jacobienne
441         else:
442             #
443             # Calcul de la valeur linéarisée de H en X appliqué à dX
444             # ------------------------------------------------------
445             _HtX = self.TangentMatrix( X, dotWith = dX )
446             if self.__mfEnabled: return [_HtX,]
447             else:                return _HtX
448
449     # ---------------------------------------------------------
450     def AdjointOperator(self, paire, **extraArgs ):
451         """
452         Calcul de l'adjoint à l'aide de la Jacobienne.
453
454         NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
455         ne doivent pas être données ici à la fonction utilisateur.
456         """
457         if self.__mfEnabled:
458             assert len(paire) == 1, "Incorrect length of arguments"
459             _paire = paire[0]
460             assert len(_paire) == 2, "Incorrect number of arguments"
461         else:
462             assert len(paire) == 2, "Incorrect number of arguments"
463             _paire = paire
464         X, Y = _paire
465         if Y is None or len(Y) == 0:
466             #
467             # Calcul de la forme matricielle si le second argument est None
468             # -------------------------------------------------------------
469             _JacobienneT = self.TangentMatrix( X ).T
470             if self.__mfEnabled: return [_JacobienneT,]
471             else:                return _JacobienneT
472         else:
473             #
474             # Calcul de la valeur de l'adjoint en X appliqué à Y
475             # --------------------------------------------------
476             _HaY = self.TangentMatrix( X, dotTWith = Y )
477             if self.__mfEnabled: return [_HaY,]
478             else:                return _HaY
479
480 # ==============================================================================
481 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
482     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
483     #
484     _bgcenter = numpy.ravel(_bgcenter)[:,None]
485     if _nbmembers < 1:
486         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
487     #
488     if _bgcovariance is None:
489         _Perturbations = numpy.tile( _bgcenter, _nbmembers)
490     else:
491         _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
492         _Perturbations = numpy.tile( _bgcenter, _nbmembers) + _Z
493     #
494     return _Perturbations
495
496 # ==============================================================================
497 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
498     "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
499     def __CenteredRandomAnomalies(Zr, N):
500         """
501         Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
502         notes manuscrites de MB et conforme au code de PS avec eps = -1
503         """
504         eps = -1
505         Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
506         Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
507         R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
508         Q = numpy.dot(Q,R)
509         Zr = numpy.dot(Q,Zr)
510         return Zr.T
511     #
512     _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
513     if _nbmembers < 1:
514         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
515     if _bgcovariance is None:
516         _Perturbations = numpy.tile( _bgcenter, _nbmembers)
517     else:
518         if _withSVD:
519             _U, _s, _V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
520             _nbctl = _bgcenter.size
521             if _nbmembers > _nbctl:
522                 _Z = numpy.concatenate((numpy.dot(
523                     numpy.diag(numpy.sqrt(_s[:_nbctl])), _V[:_nbctl]),
524                     numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
525             else:
526                 _Z = numpy.dot(numpy.diag(numpy.sqrt(_s[:_nbmembers-1])), _V[:_nbmembers-1])
527             _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
528             _Perturbations = _bgcenter + _Zca
529         else:
530             if max(abs(_bgcovariance.flatten())) > 0:
531                 _nbctl = _bgcenter.size
532                 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
533                 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
534                 _Perturbations = _bgcenter + _Zca
535             else:
536                 _Perturbations = numpy.tile( _bgcenter, _nbmembers)
537     #
538     return _Perturbations
539
540 # ==============================================================================
541 def EnsembleMean( __Ensemble ):
542     "Renvoie la moyenne empirique d'un ensemble"
543     return numpy.asarray(__Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
544
545 # ==============================================================================
546 def EnsembleOfAnomalies( __Ensemble, __OptMean = None, __Normalisation = 1.):
547     "Renvoie les anomalies centrées à partir d'un ensemble"
548     if __OptMean is None:
549         __Em = EnsembleMean( __Ensemble )
550     else:
551         __Em = numpy.ravel( __OptMean ).reshape((-1,1))
552     #
553     return __Normalisation * (numpy.asarray( __Ensemble ) - __Em)
554
555 # ==============================================================================
556 def EnsembleErrorCovariance( __Ensemble, __quick = False ):
557     "Renvoie l'estimation empirique de la covariance d'ensemble"
558     if __quick:
559         # Covariance rapide mais rarement définie positive
560         __Covariance = numpy.cov( __Ensemble )
561     else:
562         # Résultat souvent identique à numpy.cov, mais plus robuste
563         __n, __m = numpy.asarray( __Ensemble ).shape
564         __Anomalies = EnsembleOfAnomalies( __Ensemble )
565         # Estimation empirique
566         __Covariance = ( __Anomalies @ __Anomalies.T ) / (__m-1)
567         # Assure la symétrie
568         __Covariance = ( __Covariance + __Covariance.T ) * 0.5
569         # Assure la positivité
570         __epsilon    = mpr*numpy.trace( __Covariance )
571         __Covariance = __Covariance + __epsilon * numpy.identity(__n)
572     #
573     return __Covariance
574
575 # ==============================================================================
576 def EnsemblePerturbationWithGivenCovariance( __Ensemble, __Covariance, __Seed=None ):
577     "Ajout d'une perturbation à chaque membre d'un ensemble selon une covariance prescrite"
578     if hasattr(__Covariance,"assparsematrix"):
579         if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance.assparsematrix())/abs(__Ensemble).mean() < mpr).all():
580             # Traitement d'une covariance nulle ou presque
581             return __Ensemble
582         if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance.assparsematrix()) < mpr).all():
583             # Traitement d'une covariance nulle ou presque
584             return __Ensemble
585     else:
586         if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance)/abs(__Ensemble).mean() < mpr).all():
587             # Traitement d'une covariance nulle ou presque
588             return __Ensemble
589         if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance) < mpr).all():
590             # Traitement d'une covariance nulle ou presque
591             return __Ensemble
592     #
593     __n, __m = __Ensemble.shape
594     if __Seed is not None: numpy.random.seed(__Seed)
595     #
596     if hasattr(__Covariance,"isscalar") and __Covariance.isscalar():
597         # Traitement d'une covariance multiple de l'identité
598         __zero = 0.
599         __std  = numpy.sqrt(__Covariance.assparsematrix())
600         __Ensemble += numpy.random.normal(__zero, __std, size=(__m,__n)).T
601     #
602     elif hasattr(__Covariance,"isvector") and __Covariance.isvector():
603         # Traitement d'une covariance diagonale avec variances non identiques
604         __zero = numpy.zeros(__n)
605         __std  = numpy.sqrt(__Covariance.assparsematrix())
606         __Ensemble += numpy.asarray([numpy.random.normal(__zero, __std) for i in range(__m)]).T
607     #
608     elif hasattr(__Covariance,"ismatrix") and __Covariance.ismatrix():
609         # Traitement d'une covariance pleine
610         __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance.asfullmatrix(__n), size=__m).T
611     #
612     elif isinstance(__Covariance, numpy.ndarray):
613         # Traitement d'une covariance numpy pleine, sachant qu'on arrive ici en dernier
614         __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance, size=__m).T
615     #
616     else:
617         raise ValueError("Error in ensemble perturbation with inadequate covariance specification")
618     #
619     return __Ensemble
620
621 # ==============================================================================
622 def CovarianceInflation(
623         InputCovOrEns,
624         InflationType   = None,
625         InflationFactor = None,
626         BackgroundCov   = None,
627         ):
628     """
629     Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
630
631     Synthèse : Hunt 2007, section 2.3.5
632     """
633     if InflationFactor is None:
634         return InputCovOrEns
635     else:
636         InflationFactor = float(InflationFactor)
637     #
638     if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
639         if InflationFactor < 1.:
640             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
641         if InflationFactor < 1.+mpr:
642             return InputCovOrEns
643         OutputCovOrEns = InflationFactor**2 * InputCovOrEns
644     #
645     elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
646         if InflationFactor < 1.:
647             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
648         if InflationFactor < 1.+mpr:
649             return InputCovOrEns
650         InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
651         OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
652             + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
653     #
654     elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
655         if InflationFactor < 0.:
656             raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
657         if InflationFactor < mpr:
658             return InputCovOrEns
659         __n, __m = numpy.asarray(InputCovOrEns).shape
660         if __n != __m:
661             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
662         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
663     #
664     elif InflationType == "HybridOnBackgroundCovariance":
665         if InflationFactor < 0.:
666             raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
667         if InflationFactor < mpr:
668             return InputCovOrEns
669         __n, __m = numpy.asarray(InputCovOrEns).shape
670         if __n != __m:
671             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
672         if BackgroundCov is None:
673             raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
674         if InputCovOrEns.shape != BackgroundCov.shape:
675             raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
676         OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
677     #
678     elif InflationType == "Relaxation":
679         raise NotImplementedError("InflationType Relaxation")
680     #
681     else:
682         raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
683     #
684     return OutputCovOrEns
685
686 # ==============================================================================
687 def HessienneEstimation(nb, HaM, HtM, BI, RI):
688     "Estimation de la Hessienne"
689     #
690     HessienneI = []
691     for i in range(int(nb)):
692         _ee    = numpy.zeros((nb,1))
693         _ee[i] = 1.
694         _HtEE  = numpy.dot(HtM,_ee).reshape((-1,1))
695         HessienneI.append( numpy.ravel( BI * _ee + HaM * (RI * _HtEE) ) )
696     #
697     A = numpy.linalg.inv(numpy.array( HessienneI ))
698     #
699     if min(A.shape) != max(A.shape):
700         raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
701     if (numpy.diag(A) < 0).any():
702         raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
703     if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
704         try:
705             L = numpy.linalg.cholesky( A )
706         except:
707             raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
708     #
709     return A
710
711 # ==============================================================================
712 def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None):
713     "Estimation des quantiles a posteriori (selfA est modifié)"
714     nbsamples = selfA._parameters["NumberOfSamplesForQuantiles"]
715     #
716     # Traitement des bornes
717     if "StateBoundsForQuantiles" in selfA._parameters:
718         LBounds = selfA._parameters["StateBoundsForQuantiles"] # Prioritaire
719     elif "Bounds" in selfA._parameters:
720         LBounds = selfA._parameters["Bounds"]  # Défaut raisonnable
721     else:
722         LBounds = None
723     if LBounds is not None:
724         LBounds = ForceNumericBounds( LBounds )
725     _Xa = numpy.ravel(Xa)
726     #
727     # Échantillonnage des états
728     YfQ  = None
729     EXr  = None
730     for i in range(nbsamples):
731         if selfA._parameters["SimulationForQuantiles"] == "Linear" and HtM is not None and HXa is not None:
732             dXr = (numpy.random.multivariate_normal(_Xa,A) - _Xa).reshape((-1,1))
733             if LBounds is not None: # "EstimateProjection" par défaut
734                 dXr = numpy.max(numpy.hstack((dXr,LBounds[:,0].reshape((-1,1))) - Xa),axis=1)
735                 dXr = numpy.min(numpy.hstack((dXr,LBounds[:,1].reshape((-1,1))) - Xa),axis=1)
736             dYr = HtM @ dXr
737             Yr = HXa.reshape((-1,1)) + dYr
738             if selfA._toStore("SampledStateForQuantiles"): Xr = _Xa + numpy.ravel(dXr)
739         elif selfA._parameters["SimulationForQuantiles"] == "NonLinear" and Hm is not None:
740             Xr = numpy.random.multivariate_normal(_Xa,A)
741             if LBounds is not None: # "EstimateProjection" par défaut
742                 Xr = numpy.max(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,0].reshape((-1,1)))),axis=1)
743                 Xr = numpy.min(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,1].reshape((-1,1)))),axis=1)
744             Yr = Hm( Xr )
745         else:
746             raise ValueError("Quantile simulations has only to be Linear or NonLinear.")
747         #
748         if YfQ is None:
749             YfQ = Yr.reshape((-1,1))
750             if selfA._toStore("SampledStateForQuantiles"): EXr = Xr.reshape((-1,1))
751         else:
752             YfQ = numpy.hstack((YfQ,Yr.reshape((-1,1))))
753             if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.hstack((EXr,Xr.reshape((-1,1))))
754     #
755     # Extraction des quantiles
756     YfQ.sort(axis=-1)
757     YQ = None
758     for quantile in selfA._parameters["Quantiles"]:
759         if not (0. <= float(quantile) <= 1.): continue
760         indice = int(nbsamples * float(quantile) - 1./nbsamples)
761         if YQ is None: YQ = YfQ[:,indice].reshape((-1,1))
762         else:          YQ = numpy.hstack((YQ,YfQ[:,indice].reshape((-1,1))))
763     if YQ is not None: # Liste non vide de quantiles
764         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
765     if selfA._toStore("SampledStateForQuantiles"):
766         selfA.StoredVariables["SampledStateForQuantiles"].store( EXr )
767     #
768     return 0
769
770 # ==============================================================================
771 def ForceNumericBounds( __Bounds ):
772     "Force les bornes à être des valeurs numériques, sauf si globalement None"
773     # Conserve une valeur par défaut à None s'il n'y a pas de bornes
774     if __Bounds is None: return None
775     # Converti toutes les bornes individuelles None à +/- l'infini
776     __Bounds = numpy.asarray( __Bounds, dtype=float )
777     if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
778         raise ValueError("Incorrectly shaped bounds data")
779     __Bounds[numpy.isnan(__Bounds[:,0]),0] = -sys.float_info.max
780     __Bounds[numpy.isnan(__Bounds[:,1]),1] =  sys.float_info.max
781     return __Bounds
782
783 # ==============================================================================
784 def RecentredBounds( __Bounds, __Center):
785     "Recentre les bornes autour de 0, sauf si globalement None"
786     # Conserve une valeur par défaut à None s'il n'y a pas de bornes
787     if __Bounds is None: return None
788     # Recentre les valeurs numériques de bornes
789     return ForceNumericBounds( __Bounds ) - numpy.ravel( __Center ).transpose((-1,1))
790
791 # ==============================================================================
792 def ApplyBounds( __Vector, __Bounds, __newClip = True):
793     "Applique des bornes numériques à un point"
794     # Conserve une valeur par défaut s'il n'y a pas de bornes
795     if __Bounds is None: return __Vector
796     #
797     if not isinstance(__Vector, numpy.ndarray): # Is an array
798         raise ValueError("Incorrect array definition of vector data")
799     if not isinstance(__Bounds, numpy.ndarray): # Is an array
800         raise ValueError("Incorrect array definition of bounds data")
801     if 2*__Vector.size != __Bounds.size: # Is a 2 column array of vector lenght
802         raise ValueError("Incorrect bounds number (%i) to be applied for this vector (of size %i)"%(__Bounds.size,__Vector.size))
803     if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
804         raise ValueError("Incorrectly shaped bounds data")
805     #
806     if __newClip:
807         __Vector = __Vector.clip(
808             __Bounds[:,0].reshape(__Vector.shape),
809             __Bounds[:,1].reshape(__Vector.shape),
810             )
811     else:
812         __Vector = numpy.max(numpy.hstack((__Vector.reshape((-1,1)),numpy.asmatrix(__Bounds)[:,0])),axis=1)
813         __Vector = numpy.min(numpy.hstack((__Vector.reshape((-1,1)),numpy.asmatrix(__Bounds)[:,1])),axis=1)
814         __Vector = numpy.asarray(__Vector)
815     #
816     return __Vector
817
818 # ==============================================================================
819 def Apply3DVarRecentringOnEnsemble(__EnXn, __EnXf, __Ynpu, __HO, __R, __B, __Betaf):
820     "Recentre l'ensemble Xn autour de l'analyse 3DVAR"
821     #
822     Xf = EnsembleMean( __EnXf )
823     Pf = Covariance( asCovariance=EnsembleErrorCovariance(__EnXf) )
824     Pf = (1 - __Betaf) * __B + __Betaf * Pf
825     #
826     selfB = PartialAlgorithm("3DVAR")
827     selfB._parameters["Minimizer"] = "LBFGSB"
828     selfB._parameters["MaximumNumberOfSteps"] = 15000
829     selfB._parameters["CostDecrementTolerance"] = 1.e-7
830     selfB._parameters["ProjectedGradientTolerance"] = -1
831     selfB._parameters["GradientNormTolerance"] = 1.e-05
832     selfB._parameters["StoreInternalVariables"] = False
833     selfB._parameters["optiprint"] = -1
834     selfB._parameters["optdisp"] = 0
835     selfB._parameters["Bounds"] = None
836     selfB._parameters["InitializationPoint"] = Xf
837     std3dvar(selfB, Xf, __Ynpu, None, __HO, None, None, __R, Pf, None)
838     Xa = selfB.get("Analysis")[-1].reshape((-1,1))
839     del selfB
840     #
841     return Xa + EnsembleOfAnomalies( __EnXn )
842
843 # ==============================================================================
844 def c2ukf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
845     """
846     Constrained Unscented Kalman Filter
847     """
848     if selfA._parameters["EstimationOf"] == "Parameters":
849         selfA._parameters["StoreInternalVariables"] = True
850     selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
851     #
852     L     = Xb.size
853     Alpha = selfA._parameters["Alpha"]
854     Beta  = selfA._parameters["Beta"]
855     if selfA._parameters["Kappa"] == 0:
856         if selfA._parameters["EstimationOf"] == "State":
857             Kappa = 0
858         elif selfA._parameters["EstimationOf"] == "Parameters":
859             Kappa = 3 - L
860     else:
861         Kappa = selfA._parameters["Kappa"]
862     Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
863     Gamma  = math.sqrt( L + Lambda )
864     #
865     Ww = []
866     Ww.append( 0. )
867     for i in range(2*L):
868         Ww.append( 1. / (2.*(L + Lambda)) )
869     #
870     Wm = numpy.array( Ww )
871     Wm[0] = Lambda / (L + Lambda)
872     Wc = numpy.array( Ww )
873     Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
874     #
875     # Opérateurs
876     Hm = HO["Direct"].appliedControledFormTo
877     #
878     if selfA._parameters["EstimationOf"] == "State":
879         Mm = EM["Direct"].appliedControledFormTo
880     #
881     if CM is not None and "Tangent" in CM and U is not None:
882         Cm = CM["Tangent"].asMatrix(Xb)
883     else:
884         Cm = None
885     #
886     # Durée d'observation et tailles
887     if hasattr(Y,"stepnumber"):
888         duration = Y.stepnumber()
889         __p = numpy.cumprod(Y.shape())[-1]
890     else:
891         duration = 2
892         __p = numpy.array(Y).size
893     #
894     # Précalcul des inversions de B et R
895     if selfA._parameters["StoreInternalVariables"] \
896         or selfA._toStore("CostFunctionJ") \
897         or selfA._toStore("CostFunctionJb") \
898         or selfA._toStore("CostFunctionJo") \
899         or selfA._toStore("CurrentOptimum") \
900         or selfA._toStore("APosterioriCovariance"):
901         BI = B.getI()
902         RI = R.getI()
903     #
904     __n = Xb.size
905     #
906     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
907         Xn = Xb
908         if hasattr(B,"asfullmatrix"):
909             Pn = B.asfullmatrix(__n)
910         else:
911             Pn = B
912         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
913         selfA.StoredVariables["Analysis"].store( Xb )
914         if selfA._toStore("APosterioriCovariance"):
915             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
916     elif selfA._parameters["nextStep"]:
917         Xn = selfA._getInternalState("Xn")
918         Pn = selfA._getInternalState("Pn")
919     #
920     if selfA._parameters["EstimationOf"] == "Parameters":
921         XaMin            = Xn
922         previousJMinimum = numpy.finfo(float).max
923     #
924     for step in range(duration-1):
925         if hasattr(Y,"store"):
926             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
927         else:
928             Ynpu = numpy.ravel( Y ).reshape((__p,1))
929         #
930         if U is not None:
931             if hasattr(U,"store") and len(U)>1:
932                 Un = numpy.ravel( U[step] ).reshape((-1,1))
933             elif hasattr(U,"store") and len(U)==1:
934                 Un = numpy.ravel( U[0] ).reshape((-1,1))
935             else:
936                 Un = numpy.ravel( U ).reshape((-1,1))
937         else:
938             Un = None
939         #
940         Pndemi = numpy.real(scipy.linalg.sqrtm(Pn))
941         Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
942         nbSpts = 2*Xn.size+1
943         #
944         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
945             for point in range(nbSpts):
946                 Xnp[:,point] = ApplyBounds( Xnp[:,point], selfA._parameters["Bounds"] )
947         #
948         XEtnnp = []
949         for point in range(nbSpts):
950             if selfA._parameters["EstimationOf"] == "State":
951                 XEtnnpi = numpy.asarray( Mm( (Xnp[:,point], Un) ) ).reshape((-1,1))
952                 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
953                     Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
954                     XEtnnpi = XEtnnpi + Cm @ Un
955                 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
956                     XEtnnpi = ApplyBounds( XEtnnpi, selfA._parameters["Bounds"] )
957             elif selfA._parameters["EstimationOf"] == "Parameters":
958                 # --- > Par principe, M = Id, Q = 0
959                 XEtnnpi = Xnp[:,point]
960             XEtnnp.append( numpy.ravel(XEtnnpi).reshape((-1,1)) )
961         XEtnnp = numpy.concatenate( XEtnnp, axis=1 )
962         #
963         Xncm = ( XEtnnp * Wm ).sum(axis=1)
964         #
965         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
966             Xncm = ApplyBounds( Xncm, selfA._parameters["Bounds"] )
967         #
968         if selfA._parameters["EstimationOf"] == "State":        Pnm = Q
969         elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
970         for point in range(nbSpts):
971             Pnm += Wc[i] * ((XEtnnp[:,point]-Xncm).reshape((-1,1)) * (XEtnnp[:,point]-Xncm))
972         #
973         if selfA._parameters["EstimationOf"] == "Parameters" and selfA._parameters["Bounds"] is not None:
974             Pnmdemi = selfA._parameters["Reconditioner"] * numpy.real(scipy.linalg.sqrtm(Pnm))
975         else:
976             Pnmdemi = numpy.real(scipy.linalg.sqrtm(Pnm))
977         #
978         Xnnp = numpy.hstack([Xncm.reshape((-1,1)), Xncm.reshape((-1,1))+Gamma*Pnmdemi, Xncm.reshape((-1,1))-Gamma*Pnmdemi])
979         #
980         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
981             for point in range(nbSpts):
982                 Xnnp[:,point] = ApplyBounds( Xnnp[:,point], selfA._parameters["Bounds"] )
983         #
984         Ynnp = []
985         for point in range(nbSpts):
986             if selfA._parameters["EstimationOf"] == "State":
987                 Ynnpi = Hm( (Xnnp[:,point], None) )
988             elif selfA._parameters["EstimationOf"] == "Parameters":
989                 Ynnpi = Hm( (Xnnp[:,point], Un) )
990             Ynnp.append( numpy.ravel(Ynnpi).reshape((-1,1)) )
991         Ynnp = numpy.concatenate( Ynnp, axis=1 )
992         #
993         Yncm = ( Ynnp * Wm ).sum(axis=1)
994         #
995         Pyyn = R
996         Pxyn = 0.
997         for point in range(nbSpts):
998             Pyyn += Wc[i] * ((Ynnp[:,point]-Yncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
999             Pxyn += Wc[i] * ((Xnnp[:,point]-Xncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
1000         #
1001         _Innovation  = Ynpu - Yncm.reshape((-1,1))
1002         if selfA._parameters["EstimationOf"] == "Parameters":
1003             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
1004                 _Innovation = _Innovation - Cm @ Un
1005         #
1006         Kn = Pxyn * Pyyn.I
1007         Xn = Xncm.reshape((-1,1)) + Kn * _Innovation
1008         Pn = Pnm - Kn * Pyyn * Kn.T
1009         #
1010         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1011             Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
1012         #
1013         Xa = Xn # Pointeurs
1014         #--------------------------
1015         selfA._setInternalState("Xn", Xn)
1016         selfA._setInternalState("Pn", Pn)
1017         #--------------------------
1018         #
1019         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1020         # ---> avec analysis
1021         selfA.StoredVariables["Analysis"].store( Xa )
1022         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1023             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) )
1024         if selfA._toStore("InnovationAtCurrentAnalysis"):
1025             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1026         # ---> avec current state
1027         if selfA._parameters["StoreInternalVariables"] \
1028             or selfA._toStore("CurrentState"):
1029             selfA.StoredVariables["CurrentState"].store( Xn )
1030         if selfA._toStore("ForecastState"):
1031             selfA.StoredVariables["ForecastState"].store( Xncm )
1032         if selfA._toStore("ForecastCovariance"):
1033             selfA.StoredVariables["ForecastCovariance"].store( Pnm )
1034         if selfA._toStore("BMA"):
1035             selfA.StoredVariables["BMA"].store( Xncm - Xa )
1036         if selfA._toStore("InnovationAtCurrentState"):
1037             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1038         if selfA._toStore("SimulatedObservationAtCurrentState") \
1039             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1040             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm )
1041         # ---> autres
1042         if selfA._parameters["StoreInternalVariables"] \
1043             or selfA._toStore("CostFunctionJ") \
1044             or selfA._toStore("CostFunctionJb") \
1045             or selfA._toStore("CostFunctionJo") \
1046             or selfA._toStore("CurrentOptimum") \
1047             or selfA._toStore("APosterioriCovariance"):
1048             Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
1049             Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
1050             J   = Jb + Jo
1051             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1052             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1053             selfA.StoredVariables["CostFunctionJ" ].store( J )
1054             #
1055             if selfA._toStore("IndexOfOptimum") \
1056                 or selfA._toStore("CurrentOptimum") \
1057                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1058                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1059                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1060                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1061                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1062             if selfA._toStore("IndexOfOptimum"):
1063                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1064             if selfA._toStore("CurrentOptimum"):
1065                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1066             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1067                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1068             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1069                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1070             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1071                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1072             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1073                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1074         if selfA._toStore("APosterioriCovariance"):
1075             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1076         if selfA._parameters["EstimationOf"] == "Parameters" \
1077             and J < previousJMinimum:
1078             previousJMinimum    = J
1079             XaMin               = Xa
1080             if selfA._toStore("APosterioriCovariance"):
1081                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1082     #
1083     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1084     # ----------------------------------------------------------------------
1085     if selfA._parameters["EstimationOf"] == "Parameters":
1086         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1087         selfA.StoredVariables["Analysis"].store( XaMin )
1088         if selfA._toStore("APosterioriCovariance"):
1089             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1090         if selfA._toStore("BMA"):
1091             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1092     #
1093     return 0
1094
1095 # ==============================================================================
1096 def cekf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1097     """
1098     Contrained Extended Kalman Filter
1099     """
1100     if selfA._parameters["EstimationOf"] == "Parameters":
1101         selfA._parameters["StoreInternalVariables"] = True
1102     selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
1103     #
1104     # Opérateurs
1105     H = HO["Direct"].appliedControledFormTo
1106     #
1107     if selfA._parameters["EstimationOf"] == "State":
1108         M = EM["Direct"].appliedControledFormTo
1109     #
1110     if CM is not None and "Tangent" in CM and U is not None:
1111         Cm = CM["Tangent"].asMatrix(Xb)
1112     else:
1113         Cm = None
1114     #
1115     # Durée d'observation et tailles
1116     if hasattr(Y,"stepnumber"):
1117         duration = Y.stepnumber()
1118         __p = numpy.cumprod(Y.shape())[-1]
1119     else:
1120         duration = 2
1121         __p = numpy.array(Y).size
1122     #
1123     # Précalcul des inversions de B et R
1124     if selfA._parameters["StoreInternalVariables"] \
1125         or selfA._toStore("CostFunctionJ") \
1126         or selfA._toStore("CostFunctionJb") \
1127         or selfA._toStore("CostFunctionJo") \
1128         or selfA._toStore("CurrentOptimum") \
1129         or selfA._toStore("APosterioriCovariance"):
1130         BI = B.getI()
1131         RI = R.getI()
1132     #
1133     __n = Xb.size
1134     #
1135     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1136         Xn = Xb
1137         Pn = B
1138         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1139         selfA.StoredVariables["Analysis"].store( Xb )
1140         if selfA._toStore("APosterioriCovariance"):
1141             if hasattr(B,"asfullmatrix"):
1142                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1143             else:
1144                 selfA.StoredVariables["APosterioriCovariance"].store( B )
1145         selfA._setInternalState("seed", numpy.random.get_state())
1146     elif selfA._parameters["nextStep"]:
1147         Xn = selfA._getInternalState("Xn")
1148         Pn = selfA._getInternalState("Pn")
1149     #
1150     if selfA._parameters["EstimationOf"] == "Parameters":
1151         XaMin            = Xn
1152         previousJMinimum = numpy.finfo(float).max
1153     #
1154     for step in range(duration-1):
1155         if hasattr(Y,"store"):
1156             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1157         else:
1158             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1159         #
1160         Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
1161         Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
1162         Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1163         Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
1164         #
1165         if selfA._parameters["EstimationOf"] == "State":
1166             Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
1167             Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
1168             Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1169             Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
1170         #
1171         if U is not None:
1172             if hasattr(U,"store") and len(U)>1:
1173                 Un = numpy.ravel( U[step] ).reshape((-1,1))
1174             elif hasattr(U,"store") and len(U)==1:
1175                 Un = numpy.ravel( U[0] ).reshape((-1,1))
1176             else:
1177                 Un = numpy.ravel( U ).reshape((-1,1))
1178         else:
1179             Un = None
1180         #
1181         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1182             Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
1183         #
1184         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1185             Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
1186             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1187                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1188                 Xn_predicted = Xn_predicted + Cm @ Un
1189             Pn_predicted = Q + Mt * (Pn * Ma)
1190         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1191             # --- > Par principe, M = Id, Q = 0
1192             Xn_predicted = Xn
1193             Pn_predicted = Pn
1194         #
1195         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1196             Xn_predicted = ApplyBounds( Xn_predicted, selfA._parameters["Bounds"] )
1197         #
1198         if selfA._parameters["EstimationOf"] == "State":
1199             HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
1200             _Innovation  = Ynpu - HX_predicted
1201         elif selfA._parameters["EstimationOf"] == "Parameters":
1202             HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
1203             _Innovation  = Ynpu - HX_predicted
1204             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
1205                 _Innovation = _Innovation - Cm @ Un
1206         #
1207         Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
1208         Xn = Xn_predicted + Kn * _Innovation
1209         Pn = Pn_predicted - Kn * Ht * Pn_predicted
1210         #
1211         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
1212             Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
1213         #
1214         Xa = Xn # Pointeurs
1215         #--------------------------
1216         selfA._setInternalState("Xn", Xn)
1217         selfA._setInternalState("Pn", Pn)
1218         #--------------------------
1219         #
1220         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1221         # ---> avec analysis
1222         selfA.StoredVariables["Analysis"].store( Xa )
1223         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1224             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
1225         if selfA._toStore("InnovationAtCurrentAnalysis"):
1226             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1227         # ---> avec current state
1228         if selfA._parameters["StoreInternalVariables"] \
1229             or selfA._toStore("CurrentState"):
1230             selfA.StoredVariables["CurrentState"].store( Xn )
1231         if selfA._toStore("ForecastState"):
1232             selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1233         if selfA._toStore("ForecastCovariance"):
1234             selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
1235         if selfA._toStore("BMA"):
1236             selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
1237         if selfA._toStore("InnovationAtCurrentState"):
1238             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1239         if selfA._toStore("SimulatedObservationAtCurrentState") \
1240             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1241             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1242         # ---> autres
1243         if selfA._parameters["StoreInternalVariables"] \
1244             or selfA._toStore("CostFunctionJ") \
1245             or selfA._toStore("CostFunctionJb") \
1246             or selfA._toStore("CostFunctionJo") \
1247             or selfA._toStore("CurrentOptimum") \
1248             or selfA._toStore("APosterioriCovariance"):
1249             Jb  = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
1250             Jo  = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
1251             J   = Jb + Jo
1252             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1253             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1254             selfA.StoredVariables["CostFunctionJ" ].store( J )
1255             #
1256             if selfA._toStore("IndexOfOptimum") \
1257                 or selfA._toStore("CurrentOptimum") \
1258                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1259                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1260                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1261                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1262                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1263             if selfA._toStore("IndexOfOptimum"):
1264                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1265             if selfA._toStore("CurrentOptimum"):
1266                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1267             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1268                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1269             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1270                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1271             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1272                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1273             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1274                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1275         if selfA._toStore("APosterioriCovariance"):
1276             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1277         if selfA._parameters["EstimationOf"] == "Parameters" \
1278             and J < previousJMinimum:
1279             previousJMinimum    = J
1280             XaMin               = Xa
1281             if selfA._toStore("APosterioriCovariance"):
1282                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1283     #
1284     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1285     # ----------------------------------------------------------------------
1286     if selfA._parameters["EstimationOf"] == "Parameters":
1287         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1288         selfA.StoredVariables["Analysis"].store( XaMin )
1289         if selfA._toStore("APosterioriCovariance"):
1290             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1291         if selfA._toStore("BMA"):
1292             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1293     #
1294     return 0
1295
1296 # ==============================================================================
1297 def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
1298     """
1299     EnKS
1300     """
1301     #
1302     # Opérateurs
1303     H = HO["Direct"].appliedControledFormTo
1304     #
1305     if selfA._parameters["EstimationOf"] == "State":
1306         M = EM["Direct"].appliedControledFormTo
1307     #
1308     if CM is not None and "Tangent" in CM and U is not None:
1309         Cm = CM["Tangent"].asMatrix(Xb)
1310     else:
1311         Cm = None
1312     #
1313     # Précalcul des inversions de B et R
1314     RIdemi = R.sqrtmI()
1315     #
1316     # Durée d'observation et tailles
1317     LagL = selfA._parameters["SmootherLagL"]
1318     if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
1319         raise ValueError("Fixed-lag smoother requires a series of observation")
1320     if Y.stepnumber() < LagL:
1321         raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
1322     duration = Y.stepnumber()
1323     __p = numpy.cumprod(Y.shape())[-1]
1324     __n = Xb.size
1325     __m = selfA._parameters["NumberOfMembers"]
1326     #
1327     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1328         selfA.StoredVariables["Analysis"].store( Xb )
1329         if selfA._toStore("APosterioriCovariance"):
1330             if hasattr(B,"asfullmatrix"):
1331                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1332             else:
1333                 selfA.StoredVariables["APosterioriCovariance"].store( B )
1334     #
1335     # Calcul direct initial (on privilégie la mémorisation au recalcul)
1336     __seed = numpy.random.get_state()
1337     selfB = copy.deepcopy(selfA)
1338     selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
1339     if VariantM == "EnKS16-KalmanFilterFormula":
1340         etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
1341     else:
1342         raise ValueError("VariantM has to be chosen in the authorized methods list.")
1343     if LagL > 0:
1344         EL  = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
1345     else:
1346         EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
1347     selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
1348     #
1349     for step in range(LagL,duration-1):
1350         #
1351         sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
1352         sEL.append(None)
1353         #
1354         if hasattr(Y,"store"):
1355             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1356         else:
1357             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1358         #
1359         if U is not None:
1360             if hasattr(U,"store") and len(U)>1:
1361                 Un = numpy.ravel( U[step] ).reshape((-1,1))
1362             elif hasattr(U,"store") and len(U)==1:
1363                 Un = numpy.ravel( U[0] ).reshape((-1,1))
1364             else:
1365                 Un = numpy.ravel( U ).reshape((-1,1))
1366         else:
1367             Un = None
1368         #
1369         #--------------------------
1370         if VariantM == "EnKS16-KalmanFilterFormula":
1371             if selfA._parameters["EstimationOf"] == "State": # Forecast
1372                 EL = M( [(EL[:,i], Un) for i in range(__m)],
1373                     argsAsSerie = True,
1374                     returnSerieAsArrayMatrix = True )
1375                 EL = EnsemblePerturbationWithGivenCovariance( EL, Q )
1376                 EZ = H( [(EL[:,i], Un) for i in range(__m)],
1377                     argsAsSerie = True,
1378                     returnSerieAsArrayMatrix = True )
1379                 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1380                     Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1381                     EZ = EZ + Cm @ Un
1382             elif selfA._parameters["EstimationOf"] == "Parameters":
1383                 # --- > Par principe, M = Id, Q = 0
1384                 EZ = H( [(EL[:,i], Un) for i in range(__m)],
1385                     argsAsSerie = True,
1386                     returnSerieAsArrayMatrix = True )
1387             #
1388             vEm   = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1389             vZm   = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
1390             #
1391             mS    = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
1392             mS    = mS.reshape((-1,__m)) # Pour dimension 1
1393             delta = RIdemi @ ( Ynpu - vZm )
1394             mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
1395             vw    = mT @ mS.T @ delta
1396             #
1397             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
1398             mU    = numpy.identity(__m)
1399             wTU   = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
1400             #
1401             EX    = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
1402             EL    = vEm + EX @ wTU
1403             #
1404             sEL[LagL] = EL
1405             for irl in range(LagL): # Lissage des L précédentes analysis
1406                 vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1407                 EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
1408                 sEL[irl] = vEm + EX @ wTU
1409             #
1410             # Conservation de l'analyse retrospective d'ordre 0 avant rotation
1411             Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1412             if selfA._toStore("APosterioriCovariance"):
1413                 EXn = sEL[0]
1414             #
1415             for irl in range(LagL):
1416                 sEL[irl] = sEL[irl+1]
1417             sEL[LagL] = None
1418         #--------------------------
1419         else:
1420             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1421         #
1422         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1423         # ---> avec analysis
1424         selfA.StoredVariables["Analysis"].store( Xa )
1425         if selfA._toStore("APosterioriCovariance"):
1426             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
1427     #
1428     # Stockage des dernières analyses incomplètement remises à jour
1429     for irl in range(LagL):
1430         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1431         Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
1432         selfA.StoredVariables["Analysis"].store( Xa )
1433     #
1434     return 0
1435
1436 # ==============================================================================
1437 def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
1438     VariantM="KalmanFilterFormula",
1439     Hybrid=None,
1440     ):
1441     """
1442     Ensemble-Transform EnKF
1443     """
1444     if selfA._parameters["EstimationOf"] == "Parameters":
1445         selfA._parameters["StoreInternalVariables"] = True
1446     #
1447     # Opérateurs
1448     H = HO["Direct"].appliedControledFormTo
1449     #
1450     if selfA._parameters["EstimationOf"] == "State":
1451         M = EM["Direct"].appliedControledFormTo
1452     #
1453     if CM is not None and "Tangent" in CM and U is not None:
1454         Cm = CM["Tangent"].asMatrix(Xb)
1455     else:
1456         Cm = None
1457     #
1458     # Durée d'observation et tailles
1459     if hasattr(Y,"stepnumber"):
1460         duration = Y.stepnumber()
1461         __p = numpy.cumprod(Y.shape())[-1]
1462     else:
1463         duration = 2
1464         __p = numpy.array(Y).size
1465     #
1466     # Précalcul des inversions de B et R
1467     if selfA._parameters["StoreInternalVariables"] \
1468         or selfA._toStore("CostFunctionJ") \
1469         or selfA._toStore("CostFunctionJb") \
1470         or selfA._toStore("CostFunctionJo") \
1471         or selfA._toStore("CurrentOptimum") \
1472         or selfA._toStore("APosterioriCovariance"):
1473         BI = B.getI()
1474         RI = R.getI()
1475     elif VariantM != "KalmanFilterFormula":
1476         RI = R.getI()
1477     if VariantM == "KalmanFilterFormula":
1478         RIdemi = R.sqrtmI()
1479     #
1480     __n = Xb.size
1481     __m = selfA._parameters["NumberOfMembers"]
1482     #
1483     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1484         Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
1485         selfA.StoredVariables["Analysis"].store( Xb )
1486         if selfA._toStore("APosterioriCovariance"):
1487             if hasattr(B,"asfullmatrix"):
1488                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1489             else:
1490                 selfA.StoredVariables["APosterioriCovariance"].store( B )
1491         selfA._setInternalState("seed", numpy.random.get_state())
1492     elif selfA._parameters["nextStep"]:
1493         Xn = selfA._getInternalState("Xn")
1494     #
1495     previousJMinimum = numpy.finfo(float).max
1496     #
1497     for step in range(duration-1):
1498         numpy.random.set_state(selfA._getInternalState("seed"))
1499         if hasattr(Y,"store"):
1500             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1501         else:
1502             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1503         #
1504         if U is not None:
1505             if hasattr(U,"store") and len(U)>1:
1506                 Un = numpy.ravel( U[step] ).reshape((-1,1))
1507             elif hasattr(U,"store") and len(U)==1:
1508                 Un = numpy.ravel( U[0] ).reshape((-1,1))
1509             else:
1510                 Un = numpy.ravel( U ).reshape((-1,1))
1511         else:
1512             Un = None
1513         #
1514         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
1515             Xn = CovarianceInflation( Xn,
1516                 selfA._parameters["InflationType"],
1517                 selfA._parameters["InflationFactor"],
1518                 )
1519         #
1520         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1521             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
1522                 argsAsSerie = True,
1523                 returnSerieAsArrayMatrix = True )
1524             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
1525             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1526                 argsAsSerie = True,
1527                 returnSerieAsArrayMatrix = True )
1528             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1529                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1530                 Xn_predicted = Xn_predicted + Cm @ Un
1531         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1532             # --- > Par principe, M = Id, Q = 0
1533             Xn_predicted = EMX = Xn
1534             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
1535                 argsAsSerie = True,
1536                 returnSerieAsArrayMatrix = True )
1537         #
1538         # Mean of forecast and observation of forecast
1539         Xfm  = EnsembleMean( Xn_predicted )
1540         Hfm  = EnsembleMean( HX_predicted )
1541         #
1542         # Anomalies
1543         EaX   = EnsembleOfAnomalies( Xn_predicted, Xfm )
1544         EaHX  = EnsembleOfAnomalies( HX_predicted, Hfm)
1545         #
1546         #--------------------------
1547         if VariantM == "KalmanFilterFormula":
1548             mS    = RIdemi * EaHX / math.sqrt(__m-1)
1549             mS    = mS.reshape((-1,__m)) # Pour dimension 1
1550             delta = RIdemi * ( Ynpu - Hfm )
1551             mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
1552             vw    = mT @ mS.T @ delta
1553             #
1554             Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
1555             mU    = numpy.identity(__m)
1556             #
1557             EaX   = EaX / math.sqrt(__m-1)
1558             Xn    = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
1559         #--------------------------
1560         elif VariantM == "Variational":
1561             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1562             def CostFunction(w):
1563                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1564                 _Jo = 0.5 * _A.T @ (RI * _A)
1565                 _Jb = 0.5 * (__m-1) * w.T @ w
1566                 _J  = _Jo + _Jb
1567                 return float(_J)
1568             def GradientOfCostFunction(w):
1569                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1570                 _GardJo = - EaHX.T @ (RI * _A)
1571                 _GradJb = (__m-1) * w.reshape((__m,1))
1572                 _GradJ  = _GardJo + _GradJb
1573                 return numpy.ravel(_GradJ)
1574             vw = scipy.optimize.fmin_cg(
1575                 f           = CostFunction,
1576                 x0          = numpy.zeros(__m),
1577                 fprime      = GradientOfCostFunction,
1578                 args        = (),
1579                 disp        = False,
1580                 )
1581             #
1582             Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1583             Htb = (__m-1) * numpy.identity(__m)
1584             Hta = Hto + Htb
1585             #
1586             Pta = numpy.linalg.inv( Hta )
1587             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1588             #
1589             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
1590         #--------------------------
1591         elif VariantM == "FiniteSize11": # Jauge Boc2011
1592             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1593             def CostFunction(w):
1594                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1595                 _Jo = 0.5 * _A.T @ (RI * _A)
1596                 _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
1597                 _J  = _Jo + _Jb
1598                 return float(_J)
1599             def GradientOfCostFunction(w):
1600                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1601                 _GardJo = - EaHX.T @ (RI * _A)
1602                 _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1603                 _GradJ  = _GardJo + _GradJb
1604                 return numpy.ravel(_GradJ)
1605             vw = scipy.optimize.fmin_cg(
1606                 f           = CostFunction,
1607                 x0          = numpy.zeros(__m),
1608                 fprime      = GradientOfCostFunction,
1609                 args        = (),
1610                 disp        = False,
1611                 )
1612             #
1613             Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1614             Htb = __m * \
1615                 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1616                 / (1 + 1/__m + vw.T @ vw)**2
1617             Hta = Hto + Htb
1618             #
1619             Pta = numpy.linalg.inv( Hta )
1620             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1621             #
1622             Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1623         #--------------------------
1624         elif VariantM == "FiniteSize15": # Jauge Boc2015
1625             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1626             def CostFunction(w):
1627                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1628                 _Jo = 0.5 * _A.T * (RI * _A)
1629                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
1630                 _J  = _Jo + _Jb
1631                 return float(_J)
1632             def GradientOfCostFunction(w):
1633                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1634                 _GardJo = - EaHX.T @ (RI * _A)
1635                 _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
1636                 _GradJ  = _GardJo + _GradJb
1637                 return numpy.ravel(_GradJ)
1638             vw = scipy.optimize.fmin_cg(
1639                 f           = CostFunction,
1640                 x0          = numpy.zeros(__m),
1641                 fprime      = GradientOfCostFunction,
1642                 args        = (),
1643                 disp        = False,
1644                 )
1645             #
1646             Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1647             Htb = (__m+1) * \
1648                 ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
1649                 / (1 + 1/__m + vw.T @ vw)**2
1650             Hta = Hto + Htb
1651             #
1652             Pta = numpy.linalg.inv( Hta )
1653             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1654             #
1655             Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
1656         #--------------------------
1657         elif VariantM == "FiniteSize16": # Jauge Boc2016
1658             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
1659             def CostFunction(w):
1660                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1661                 _Jo = 0.5 * _A.T @ (RI * _A)
1662                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
1663                 _J  = _Jo + _Jb
1664                 return float(_J)
1665             def GradientOfCostFunction(w):
1666                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
1667                 _GardJo = - EaHX.T @ (RI * _A)
1668                 _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
1669                 _GradJ  = _GardJo + _GradJb
1670                 return numpy.ravel(_GradJ)
1671             vw = scipy.optimize.fmin_cg(
1672                 f           = CostFunction,
1673                 x0          = numpy.zeros(__m),
1674                 fprime      = GradientOfCostFunction,
1675                 args        = (),
1676                 disp        = False,
1677                 )
1678             #
1679             Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
1680             Htb = ((__m+1) / (__m-1)) * \
1681                 ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
1682                 / (1 + 1/__m + vw.T @ vw / (__m-1))**2
1683             Hta = Hto + Htb
1684             #
1685             Pta = numpy.linalg.inv( Hta )
1686             EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
1687             #
1688             Xn  = Xfm + EaX @ (vw[:,None] + EWa)
1689         #--------------------------
1690         else:
1691             raise ValueError("VariantM has to be chosen in the authorized methods list.")
1692         #
1693         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
1694             Xn = CovarianceInflation( Xn,
1695                 selfA._parameters["InflationType"],
1696                 selfA._parameters["InflationFactor"],
1697                 )
1698         #
1699         if Hybrid == "E3DVAR":
1700             betaf = selfA._parameters["HybridCovarianceEquilibrium"]
1701             Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
1702         #
1703         Xa = EnsembleMean( Xn )
1704         #--------------------------
1705         selfA._setInternalState("Xn", Xn)
1706         selfA._setInternalState("seed", numpy.random.get_state())
1707         #--------------------------
1708         #
1709         if selfA._parameters["StoreInternalVariables"] \
1710             or selfA._toStore("CostFunctionJ") \
1711             or selfA._toStore("CostFunctionJb") \
1712             or selfA._toStore("CostFunctionJo") \
1713             or selfA._toStore("APosterioriCovariance") \
1714             or selfA._toStore("InnovationAtCurrentAnalysis") \
1715             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
1716             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1717             _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
1718             _Innovation = Ynpu - _HXa
1719         #
1720         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1721         # ---> avec analysis
1722         selfA.StoredVariables["Analysis"].store( Xa )
1723         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1724             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
1725         if selfA._toStore("InnovationAtCurrentAnalysis"):
1726             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1727         # ---> avec current state
1728         if selfA._parameters["StoreInternalVariables"] \
1729             or selfA._toStore("CurrentState"):
1730             selfA.StoredVariables["CurrentState"].store( Xn )
1731         if selfA._toStore("ForecastState"):
1732             selfA.StoredVariables["ForecastState"].store( EMX )
1733         if selfA._toStore("ForecastCovariance"):
1734             selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
1735         if selfA._toStore("BMA"):
1736             selfA.StoredVariables["BMA"].store( EMX - Xa )
1737         if selfA._toStore("InnovationAtCurrentState"):
1738             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
1739         if selfA._toStore("SimulatedObservationAtCurrentState") \
1740             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1741             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1742         # ---> autres
1743         if selfA._parameters["StoreInternalVariables"] \
1744             or selfA._toStore("CostFunctionJ") \
1745             or selfA._toStore("CostFunctionJb") \
1746             or selfA._toStore("CostFunctionJo") \
1747             or selfA._toStore("CurrentOptimum") \
1748             or selfA._toStore("APosterioriCovariance"):
1749             Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
1750             Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
1751             J   = Jb + Jo
1752             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1753             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1754             selfA.StoredVariables["CostFunctionJ" ].store( J )
1755             #
1756             if selfA._toStore("IndexOfOptimum") \
1757                 or selfA._toStore("CurrentOptimum") \
1758                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1759                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1760                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1761                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1762                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1763             if selfA._toStore("IndexOfOptimum"):
1764                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1765             if selfA._toStore("CurrentOptimum"):
1766                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1767             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1768                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1769             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1770                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1771             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1772                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1773             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1774                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1775         if selfA._toStore("APosterioriCovariance"):
1776             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
1777         if selfA._parameters["EstimationOf"] == "Parameters" \
1778             and J < previousJMinimum:
1779             previousJMinimum    = J
1780             XaMin               = Xa
1781             if selfA._toStore("APosterioriCovariance"):
1782                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1783         # ---> Pour les smoothers
1784         if selfA._toStore("CurrentEnsembleState"):
1785             selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
1786     #
1787     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1788     # ----------------------------------------------------------------------
1789     if selfA._parameters["EstimationOf"] == "Parameters":
1790         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1791         selfA.StoredVariables["Analysis"].store( XaMin )
1792         if selfA._toStore("APosterioriCovariance"):
1793             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1794         if selfA._toStore("BMA"):
1795             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1796     #
1797     return 0
1798
1799 # ==============================================================================
1800 def exkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
1801     """
1802     Extended Kalman Filter
1803     """
1804     if selfA._parameters["EstimationOf"] == "Parameters":
1805         selfA._parameters["StoreInternalVariables"] = True
1806     #
1807     # Opérateurs
1808     H = HO["Direct"].appliedControledFormTo
1809     #
1810     if selfA._parameters["EstimationOf"] == "State":
1811         M = EM["Direct"].appliedControledFormTo
1812     #
1813     if CM is not None and "Tangent" in CM and U is not None:
1814         Cm = CM["Tangent"].asMatrix(Xb)
1815     else:
1816         Cm = None
1817     #
1818     # Durée d'observation et tailles
1819     if hasattr(Y,"stepnumber"):
1820         duration = Y.stepnumber()
1821         __p = numpy.cumprod(Y.shape())[-1]
1822     else:
1823         duration = 2
1824         __p = numpy.array(Y).size
1825     #
1826     # Précalcul des inversions de B et R
1827     if selfA._parameters["StoreInternalVariables"] \
1828         or selfA._toStore("CostFunctionJ") \
1829         or selfA._toStore("CostFunctionJb") \
1830         or selfA._toStore("CostFunctionJo") \
1831         or selfA._toStore("CurrentOptimum") \
1832         or selfA._toStore("APosterioriCovariance"):
1833         BI = B.getI()
1834         RI = R.getI()
1835     #
1836     __n = Xb.size
1837     #
1838     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
1839         Xn = Xb
1840         Pn = B
1841         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1842         selfA.StoredVariables["Analysis"].store( Xb )
1843         if selfA._toStore("APosterioriCovariance"):
1844             if hasattr(B,"asfullmatrix"):
1845                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
1846             else:
1847                 selfA.StoredVariables["APosterioriCovariance"].store( B )
1848         selfA._setInternalState("seed", numpy.random.get_state())
1849     elif selfA._parameters["nextStep"]:
1850         Xn = selfA._getInternalState("Xn")
1851         Pn = selfA._getInternalState("Pn")
1852     #
1853     if selfA._parameters["EstimationOf"] == "Parameters":
1854         XaMin            = Xn
1855         previousJMinimum = numpy.finfo(float).max
1856     #
1857     for step in range(duration-1):
1858         if hasattr(Y,"store"):
1859             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
1860         else:
1861             Ynpu = numpy.ravel( Y ).reshape((__p,1))
1862         #
1863         Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
1864         Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
1865         Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1866         Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
1867         #
1868         if selfA._parameters["EstimationOf"] == "State":
1869             Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
1870             Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
1871             Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
1872             Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
1873         #
1874         if U is not None:
1875             if hasattr(U,"store") and len(U)>1:
1876                 Un = numpy.ravel( U[step] ).reshape((-1,1))
1877             elif hasattr(U,"store") and len(U)==1:
1878                 Un = numpy.ravel( U[0] ).reshape((-1,1))
1879             else:
1880                 Un = numpy.ravel( U ).reshape((-1,1))
1881         else:
1882             Un = None
1883         #
1884         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
1885             Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
1886             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
1887                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
1888                 Xn_predicted = Xn_predicted + Cm @ Un
1889             Pn_predicted = Q + Mt * (Pn * Ma)
1890         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
1891             # --- > Par principe, M = Id, Q = 0
1892             Xn_predicted = Xn
1893             Pn_predicted = Pn
1894         #
1895         if selfA._parameters["EstimationOf"] == "State":
1896             HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
1897             _Innovation  = Ynpu - HX_predicted
1898         elif selfA._parameters["EstimationOf"] == "Parameters":
1899             HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
1900             _Innovation  = Ynpu - HX_predicted
1901             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
1902                 _Innovation = _Innovation - Cm @ Un
1903         #
1904         Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
1905         Xn = Xn_predicted + Kn * _Innovation
1906         Pn = Pn_predicted - Kn * Ht * Pn_predicted
1907         #
1908         Xa = Xn # Pointeurs
1909         #--------------------------
1910         selfA._setInternalState("Xn", Xn)
1911         selfA._setInternalState("Pn", Pn)
1912         #--------------------------
1913         #
1914         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1915         # ---> avec analysis
1916         selfA.StoredVariables["Analysis"].store( Xa )
1917         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
1918             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
1919         if selfA._toStore("InnovationAtCurrentAnalysis"):
1920             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
1921         # ---> avec current state
1922         if selfA._parameters["StoreInternalVariables"] \
1923             or selfA._toStore("CurrentState"):
1924             selfA.StoredVariables["CurrentState"].store( Xn )
1925         if selfA._toStore("ForecastState"):
1926             selfA.StoredVariables["ForecastState"].store( Xn_predicted )
1927         if selfA._toStore("ForecastCovariance"):
1928             selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
1929         if selfA._toStore("BMA"):
1930             selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
1931         if selfA._toStore("InnovationAtCurrentState"):
1932             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
1933         if selfA._toStore("SimulatedObservationAtCurrentState") \
1934             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1935             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
1936         # ---> autres
1937         if selfA._parameters["StoreInternalVariables"] \
1938             or selfA._toStore("CostFunctionJ") \
1939             or selfA._toStore("CostFunctionJb") \
1940             or selfA._toStore("CostFunctionJo") \
1941             or selfA._toStore("CurrentOptimum") \
1942             or selfA._toStore("APosterioriCovariance"):
1943             Jb  = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
1944             Jo  = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
1945             J   = Jb + Jo
1946             selfA.StoredVariables["CostFunctionJb"].store( Jb )
1947             selfA.StoredVariables["CostFunctionJo"].store( Jo )
1948             selfA.StoredVariables["CostFunctionJ" ].store( J )
1949             #
1950             if selfA._toStore("IndexOfOptimum") \
1951                 or selfA._toStore("CurrentOptimum") \
1952                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
1953                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
1954                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
1955                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1956                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
1957             if selfA._toStore("IndexOfOptimum"):
1958                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
1959             if selfA._toStore("CurrentOptimum"):
1960                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
1961             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
1962                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
1963             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
1964                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
1965             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
1966                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
1967             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
1968                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
1969         if selfA._toStore("APosterioriCovariance"):
1970             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
1971         if selfA._parameters["EstimationOf"] == "Parameters" \
1972             and J < previousJMinimum:
1973             previousJMinimum    = J
1974             XaMin               = Xa
1975             if selfA._toStore("APosterioriCovariance"):
1976                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
1977     #
1978     # Stockage final supplémentaire de l'optimum en estimation de paramètres
1979     # ----------------------------------------------------------------------
1980     if selfA._parameters["EstimationOf"] == "Parameters":
1981         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
1982         selfA.StoredVariables["Analysis"].store( XaMin )
1983         if selfA._toStore("APosterioriCovariance"):
1984             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
1985         if selfA._toStore("BMA"):
1986             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
1987     #
1988     return 0
1989
1990 # ==============================================================================
1991 def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
1992     BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
1993     """
1994     Iterative EnKF
1995     """
1996     if selfA._parameters["EstimationOf"] == "Parameters":
1997         selfA._parameters["StoreInternalVariables"] = True
1998     #
1999     # Opérateurs
2000     H = HO["Direct"].appliedControledFormTo
2001     #
2002     if selfA._parameters["EstimationOf"] == "State":
2003         M = EM["Direct"].appliedControledFormTo
2004     #
2005     if CM is not None and "Tangent" in CM and U is not None:
2006         Cm = CM["Tangent"].asMatrix(Xb)
2007     else:
2008         Cm = None
2009     #
2010     # Durée d'observation et tailles
2011     if hasattr(Y,"stepnumber"):
2012         duration = Y.stepnumber()
2013         __p = numpy.cumprod(Y.shape())[-1]
2014     else:
2015         duration = 2
2016         __p = numpy.array(Y).size
2017     #
2018     # Précalcul des inversions de B et R
2019     if selfA._parameters["StoreInternalVariables"] \
2020         or selfA._toStore("CostFunctionJ") \
2021         or selfA._toStore("CostFunctionJb") \
2022         or selfA._toStore("CostFunctionJo") \
2023         or selfA._toStore("CurrentOptimum") \
2024         or selfA._toStore("APosterioriCovariance"):
2025         BI = B.getI()
2026     RI = R.getI()
2027     #
2028     __n = Xb.size
2029     __m = selfA._parameters["NumberOfMembers"]
2030     #
2031     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2032         if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
2033         else:                         Pn = B
2034         Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
2035         selfA.StoredVariables["Analysis"].store( Xb )
2036         if selfA._toStore("APosterioriCovariance"):
2037             if hasattr(B,"asfullmatrix"):
2038                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
2039             else:
2040                 selfA.StoredVariables["APosterioriCovariance"].store( B )
2041         selfA._setInternalState("seed", numpy.random.get_state())
2042     elif selfA._parameters["nextStep"]:
2043         Xn = selfA._getInternalState("Xn")
2044     #
2045     previousJMinimum = numpy.finfo(float).max
2046     #
2047     for step in range(duration-1):
2048         numpy.random.set_state(selfA._getInternalState("seed"))
2049         if hasattr(Y,"store"):
2050             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2051         else:
2052             Ynpu = numpy.ravel( Y ).reshape((__p,1))
2053         #
2054         if U is not None:
2055             if hasattr(U,"store") and len(U)>1:
2056                 Un = numpy.ravel( U[step] ).reshape((-1,1))
2057             elif hasattr(U,"store") and len(U)==1:
2058                 Un = numpy.ravel( U[0] ).reshape((-1,1))
2059             else:
2060                 Un = numpy.ravel( U ).reshape((-1,1))
2061         else:
2062             Un = None
2063         #
2064         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2065             Xn = CovarianceInflation( Xn,
2066                 selfA._parameters["InflationType"],
2067                 selfA._parameters["InflationFactor"],
2068                 )
2069         #
2070         #--------------------------
2071         if VariantM == "IEnKF12":
2072             Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
2073             EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
2074             __j = 0
2075             Deltaw = 1
2076             if not BnotT:
2077                 Ta  = numpy.identity(__m)
2078             vw  = numpy.zeros(__m)
2079             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2080                 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
2081                 #
2082                 if BnotT:
2083                     E1 = vx1 + _epsilon * EaX
2084                 else:
2085                     E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
2086                 #
2087                 if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
2088                     E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2089                         argsAsSerie = True,
2090                         returnSerieAsArrayMatrix = True )
2091                 elif selfA._parameters["EstimationOf"] == "Parameters":
2092                     # --- > Par principe, M = Id
2093                     E2 = Xn
2094                 vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
2095                 vy1 = H((vx2, Un)).reshape((__p,1))
2096                 #
2097                 HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
2098                     argsAsSerie = True,
2099                     returnSerieAsArrayMatrix = True )
2100                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2101                 #
2102                 if BnotT:
2103                     EaY = (HE2 - vy2) / _epsilon
2104                 else:
2105                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
2106                 #
2107                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
2108                 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
2109                 Deltaw = - numpy.linalg.solve(mH,GradJ)
2110                 #
2111                 vw = vw + Deltaw
2112                 #
2113                 if not BnotT:
2114                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2115                 #
2116                 __j = __j + 1
2117             #
2118             A2 = EnsembleOfAnomalies( E2 )
2119             #
2120             if BnotT:
2121                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2122                 A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
2123             #
2124             Xn = vx2 + A2
2125         #--------------------------
2126         else:
2127             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2128         #
2129         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2130             Xn = CovarianceInflation( Xn,
2131                 selfA._parameters["InflationType"],
2132                 selfA._parameters["InflationFactor"],
2133                 )
2134         #
2135         Xa = EnsembleMean( Xn )
2136         #--------------------------
2137         selfA._setInternalState("Xn", Xn)
2138         selfA._setInternalState("seed", numpy.random.get_state())
2139         #--------------------------
2140         #
2141         if selfA._parameters["StoreInternalVariables"] \
2142             or selfA._toStore("CostFunctionJ") \
2143             or selfA._toStore("CostFunctionJb") \
2144             or selfA._toStore("CostFunctionJo") \
2145             or selfA._toStore("APosterioriCovariance") \
2146             or selfA._toStore("InnovationAtCurrentAnalysis") \
2147             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2148             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2149             _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
2150             _Innovation = Ynpu - _HXa
2151         #
2152         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2153         # ---> avec analysis
2154         selfA.StoredVariables["Analysis"].store( Xa )
2155         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2156             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2157         if selfA._toStore("InnovationAtCurrentAnalysis"):
2158             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2159         # ---> avec current state
2160         if selfA._parameters["StoreInternalVariables"] \
2161             or selfA._toStore("CurrentState"):
2162             selfA.StoredVariables["CurrentState"].store( Xn )
2163         if selfA._toStore("ForecastState"):
2164             selfA.StoredVariables["ForecastState"].store( E2 )
2165         if selfA._toStore("ForecastCovariance"):
2166             selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(E2) )
2167         if selfA._toStore("BMA"):
2168             selfA.StoredVariables["BMA"].store( E2 - Xa )
2169         if selfA._toStore("InnovationAtCurrentState"):
2170             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
2171         if selfA._toStore("SimulatedObservationAtCurrentState") \
2172             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2173             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2174         # ---> autres
2175         if selfA._parameters["StoreInternalVariables"] \
2176             or selfA._toStore("CostFunctionJ") \
2177             or selfA._toStore("CostFunctionJb") \
2178             or selfA._toStore("CostFunctionJo") \
2179             or selfA._toStore("CurrentOptimum") \
2180             or selfA._toStore("APosterioriCovariance"):
2181             Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
2182             Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
2183             J   = Jb + Jo
2184             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2185             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2186             selfA.StoredVariables["CostFunctionJ" ].store( J )
2187             #
2188             if selfA._toStore("IndexOfOptimum") \
2189                 or selfA._toStore("CurrentOptimum") \
2190                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2191                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2192                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2193                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2194                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2195             if selfA._toStore("IndexOfOptimum"):
2196                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2197             if selfA._toStore("CurrentOptimum"):
2198                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2199             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2200                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2201             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2202                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2203             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2204                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2205             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2206                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2207         if selfA._toStore("APosterioriCovariance"):
2208             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2209         if selfA._parameters["EstimationOf"] == "Parameters" \
2210             and J < previousJMinimum:
2211             previousJMinimum    = J
2212             XaMin               = Xa
2213             if selfA._toStore("APosterioriCovariance"):
2214                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
2215         # ---> Pour les smoothers
2216         if selfA._toStore("CurrentEnsembleState"):
2217             selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
2218     #
2219     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2220     # ----------------------------------------------------------------------
2221     if selfA._parameters["EstimationOf"] == "Parameters":
2222         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2223         selfA.StoredVariables["Analysis"].store( XaMin )
2224         if selfA._toStore("APosterioriCovariance"):
2225             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2226         if selfA._toStore("BMA"):
2227             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2228     #
2229     return 0
2230
2231 # ==============================================================================
2232 def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2233     """
2234     3DVAR incrémental
2235     """
2236     #
2237     # Initialisations
2238     # ---------------
2239     Hm = HO["Direct"].appliedTo
2240     #
2241     BI = B.getI()
2242     RI = R.getI()
2243     #
2244     Xini = selfA._parameters["InitializationPoint"]
2245     #
2246     HXb = numpy.ravel( Hm( Xb ) ).reshape((-1,1))
2247     Innovation = Y - HXb
2248     #
2249     # Outer Loop
2250     # ----------
2251     iOuter = 0
2252     J      = 1./mpr
2253     DeltaJ = 1./mpr
2254     Xr     = Xini.reshape((-1,1))
2255     while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
2256         #
2257         # Inner Loop
2258         # ----------
2259         Ht = HO["Tangent"].asMatrix(Xr)
2260         Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
2261         #
2262         # Définition de la fonction-coût
2263         # ------------------------------
2264         def CostFunction(dx):
2265             _dX  = numpy.ravel( dx ).reshape((-1,1))
2266             if selfA._parameters["StoreInternalVariables"] or \
2267                 selfA._toStore("CurrentState") or \
2268                 selfA._toStore("CurrentOptimum"):
2269                 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
2270             _HdX = Ht @ _dX
2271             _HdX = numpy.ravel( _HdX ).reshape((-1,1))
2272             _dInnovation = Innovation - _HdX
2273             if selfA._toStore("SimulatedObservationAtCurrentState") or \
2274                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2275                 selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
2276             if selfA._toStore("InnovationAtCurrentState"):
2277                 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
2278             #
2279             Jb  = float( 0.5 * _dX.T * (BI * _dX) )
2280             Jo  = float( 0.5 * _dInnovation.T * (RI * _dInnovation) )
2281             J   = Jb + Jo
2282             #
2283             selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2284             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2285             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2286             selfA.StoredVariables["CostFunctionJ" ].store( J )
2287             if selfA._toStore("IndexOfOptimum") or \
2288                 selfA._toStore("CurrentOptimum") or \
2289                 selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2290                 selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2291                 selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2292                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2293                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2294             if selfA._toStore("IndexOfOptimum"):
2295                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2296             if selfA._toStore("CurrentOptimum"):
2297                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2298             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2299                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2300             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2301                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2302             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2303                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2304             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2305                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2306             return J
2307         #
2308         def GradientOfCostFunction(dx):
2309             _dX          = numpy.ravel( dx )
2310             _HdX         = Ht @ _dX
2311             _HdX         = numpy.ravel( _HdX ).reshape((-1,1))
2312             _dInnovation = Innovation - _HdX
2313             GradJb       = BI @ _dX
2314             GradJo       = - Ht.T @ (RI * _dInnovation)
2315             GradJ        = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2316             return GradJ
2317         #
2318         # Minimisation de la fonctionnelle
2319         # --------------------------------
2320         nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2321         #
2322         if selfA._parameters["Minimizer"] == "LBFGSB":
2323             # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
2324             if "0.19" <= scipy.version.version <= "1.1.0":
2325                 import lbfgsbhlt as optimiseur
2326             else:
2327                 import scipy.optimize as optimiseur
2328             Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2329                 func        = CostFunction,
2330                 x0          = numpy.zeros(Xini.size),
2331                 fprime      = GradientOfCostFunction,
2332                 args        = (),
2333                 bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
2334                 maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
2335                 factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
2336                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2337                 iprint      = selfA._parameters["optiprint"],
2338                 )
2339             nfeval = Informations['funcalls']
2340             rc     = Informations['warnflag']
2341         elif selfA._parameters["Minimizer"] == "TNC":
2342             Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2343                 func        = CostFunction,
2344                 x0          = numpy.zeros(Xini.size),
2345                 fprime      = GradientOfCostFunction,
2346                 args        = (),
2347                 bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
2348                 maxfun      = selfA._parameters["MaximumNumberOfSteps"],
2349                 pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2350                 ftol        = selfA._parameters["CostDecrementTolerance"],
2351                 messages    = selfA._parameters["optmessages"],
2352                 )
2353         elif selfA._parameters["Minimizer"] == "CG":
2354             Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2355                 f           = CostFunction,
2356                 x0          = numpy.zeros(Xini.size),
2357                 fprime      = GradientOfCostFunction,
2358                 args        = (),
2359                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2360                 gtol        = selfA._parameters["GradientNormTolerance"],
2361                 disp        = selfA._parameters["optdisp"],
2362                 full_output = True,
2363                 )
2364         elif selfA._parameters["Minimizer"] == "NCG":
2365             Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2366                 f           = CostFunction,
2367                 x0          = numpy.zeros(Xini.size),
2368                 fprime      = GradientOfCostFunction,
2369                 args        = (),
2370                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2371                 avextol     = selfA._parameters["CostDecrementTolerance"],
2372                 disp        = selfA._parameters["optdisp"],
2373                 full_output = True,
2374                 )
2375         elif selfA._parameters["Minimizer"] == "BFGS":
2376             Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
2377                 f           = CostFunction,
2378                 x0          = numpy.zeros(Xini.size),
2379                 fprime      = GradientOfCostFunction,
2380                 args        = (),
2381                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2382                 gtol        = selfA._parameters["GradientNormTolerance"],
2383                 disp        = selfA._parameters["optdisp"],
2384                 full_output = True,
2385                 )
2386         else:
2387             raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
2388         #
2389         IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2390         MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
2391         #
2392         if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
2393             Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
2394         else:
2395             Minimum = Xb + Minimum.reshape((-1,1))
2396         #
2397         Xr     = Minimum
2398         DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
2399         iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
2400     #
2401     Xa = Xr
2402     #--------------------------
2403     #
2404     selfA.StoredVariables["Analysis"].store( Xa )
2405     #
2406     if selfA._toStore("OMA") or \
2407         selfA._toStore("SigmaObs2") or \
2408         selfA._toStore("SimulationQuantiles") or \
2409         selfA._toStore("SimulatedObservationAtOptimum"):
2410         if selfA._toStore("SimulatedObservationAtCurrentState"):
2411             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
2412         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2413             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
2414         else:
2415             HXa = Hm( Xa )
2416     #
2417     if selfA._toStore("APosterioriCovariance") or \
2418         selfA._toStore("SimulationQuantiles") or \
2419         selfA._toStore("JacobianMatrixAtOptimum") or \
2420         selfA._toStore("KalmanGainAtOptimum"):
2421         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
2422         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
2423     if selfA._toStore("APosterioriCovariance") or \
2424         selfA._toStore("SimulationQuantiles") or \
2425         selfA._toStore("KalmanGainAtOptimum"):
2426         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
2427         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
2428     if selfA._toStore("APosterioriCovariance") or \
2429         selfA._toStore("SimulationQuantiles"):
2430         A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
2431     if selfA._toStore("APosterioriCovariance"):
2432         selfA.StoredVariables["APosterioriCovariance"].store( A )
2433     if selfA._toStore("JacobianMatrixAtOptimum"):
2434         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
2435     if selfA._toStore("KalmanGainAtOptimum"):
2436         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
2437         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
2438         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
2439     #
2440     # Calculs et/ou stockages supplémentaires
2441     # ---------------------------------------
2442     if selfA._toStore("Innovation") or \
2443         selfA._toStore("SigmaObs2") or \
2444         selfA._toStore("MahalanobisConsistency") or \
2445         selfA._toStore("OMB"):
2446         d  = Y - HXb
2447     if selfA._toStore("Innovation"):
2448         selfA.StoredVariables["Innovation"].store( d )
2449     if selfA._toStore("BMA"):
2450         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
2451     if selfA._toStore("OMA"):
2452         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
2453     if selfA._toStore("OMB"):
2454         selfA.StoredVariables["OMB"].store( d )
2455     if selfA._toStore("SigmaObs2"):
2456         TraceR = R.trace(Y.size)
2457         selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
2458     if selfA._toStore("MahalanobisConsistency"):
2459         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
2460     if selfA._toStore("SimulationQuantiles"):
2461         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
2462     if selfA._toStore("SimulatedObservationAtBackground"):
2463         selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
2464     if selfA._toStore("SimulatedObservationAtOptimum"):
2465         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
2466     #
2467     return 0
2468
2469 # ==============================================================================
2470 def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
2471     VariantM="MLEF13", BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000,
2472     Hybrid=None,
2473     ):
2474     """
2475     Maximum Likelihood Ensemble Filter
2476     """
2477     if selfA._parameters["EstimationOf"] == "Parameters":
2478         selfA._parameters["StoreInternalVariables"] = True
2479     #
2480     # Opérateurs
2481     H = HO["Direct"].appliedControledFormTo
2482     #
2483     if selfA._parameters["EstimationOf"] == "State":
2484         M = EM["Direct"].appliedControledFormTo
2485     #
2486     if CM is not None and "Tangent" in CM and U is not None:
2487         Cm = CM["Tangent"].asMatrix(Xb)
2488     else:
2489         Cm = None
2490     #
2491     # Durée d'observation et tailles
2492     if hasattr(Y,"stepnumber"):
2493         duration = Y.stepnumber()
2494         __p = numpy.cumprod(Y.shape())[-1]
2495     else:
2496         duration = 2
2497         __p = numpy.array(Y).size
2498     #
2499     # Précalcul des inversions de B et R
2500     if selfA._parameters["StoreInternalVariables"] \
2501         or selfA._toStore("CostFunctionJ") \
2502         or selfA._toStore("CostFunctionJb") \
2503         or selfA._toStore("CostFunctionJo") \
2504         or selfA._toStore("CurrentOptimum") \
2505         or selfA._toStore("APosterioriCovariance"):
2506         BI = B.getI()
2507     RI = R.getI()
2508     #
2509     __n = Xb.size
2510     __m = selfA._parameters["NumberOfMembers"]
2511     #
2512     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2513         Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
2514         selfA.StoredVariables["Analysis"].store( Xb )
2515         if selfA._toStore("APosterioriCovariance"):
2516             if hasattr(B,"asfullmatrix"):
2517                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
2518             else:
2519                 selfA.StoredVariables["APosterioriCovariance"].store( B )
2520         selfA._setInternalState("seed", numpy.random.get_state())
2521     elif selfA._parameters["nextStep"]:
2522         Xn = selfA._getInternalState("Xn")
2523     #
2524     previousJMinimum = numpy.finfo(float).max
2525     #
2526     for step in range(duration-1):
2527         numpy.random.set_state(selfA._getInternalState("seed"))
2528         if hasattr(Y,"store"):
2529             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
2530         else:
2531             Ynpu = numpy.ravel( Y ).reshape((__p,1))
2532         #
2533         if U is not None:
2534             if hasattr(U,"store") and len(U)>1:
2535                 Un = numpy.ravel( U[step] ).reshape((-1,1))
2536             elif hasattr(U,"store") and len(U)==1:
2537                 Un = numpy.ravel( U[0] ).reshape((-1,1))
2538             else:
2539                 Un = numpy.ravel( U ).reshape((-1,1))
2540         else:
2541             Un = None
2542         #
2543         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
2544             Xn = CovarianceInflation( Xn,
2545                 selfA._parameters["InflationType"],
2546                 selfA._parameters["InflationFactor"],
2547                 )
2548         #
2549         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
2550             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
2551                 argsAsSerie = True,
2552                 returnSerieAsArrayMatrix = True )
2553             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
2554             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2555                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2556                 Xn_predicted = Xn_predicted + Cm @ Un
2557         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
2558             # --- > Par principe, M = Id, Q = 0
2559             Xn_predicted = EMX = Xn
2560         #
2561         #--------------------------
2562         if VariantM == "MLEF13":
2563             Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
2564             EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
2565             Ua  = numpy.identity(__m)
2566             __j = 0
2567             Deltaw = 1
2568             if not BnotT:
2569                 Ta  = numpy.identity(__m)
2570             vw  = numpy.zeros(__m)
2571             while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
2572                 vx1 = (Xfm + EaX @ vw).reshape((__n,1))
2573                 #
2574                 if BnotT:
2575                     E1 = vx1 + _epsilon * EaX
2576                 else:
2577                     E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
2578                 #
2579                 HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
2580                     argsAsSerie = True,
2581                     returnSerieAsArrayMatrix = True )
2582                 vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
2583                 #
2584                 if BnotT:
2585                     EaY = (HE2 - vy2) / _epsilon
2586                 else:
2587                     EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
2588                 #
2589                 GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
2590                 mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
2591                 Deltaw = - numpy.linalg.solve(mH,GradJ)
2592                 #
2593                 vw = vw + Deltaw
2594                 #
2595                 if not BnotT:
2596                     Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2597                 #
2598                 __j = __j + 1
2599             #
2600             if BnotT:
2601                 Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
2602             #
2603             Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
2604         #--------------------------
2605         else:
2606             raise ValueError("VariantM has to be chosen in the authorized methods list.")
2607         #
2608         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
2609             Xn = CovarianceInflation( Xn,
2610                 selfA._parameters["InflationType"],
2611                 selfA._parameters["InflationFactor"],
2612                 )
2613         #
2614         if Hybrid == "E3DVAR":
2615             betaf = selfA._parameters["HybridCovarianceEquilibrium"]
2616             Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
2617         #
2618         Xa = EnsembleMean( Xn )
2619         #--------------------------
2620         selfA._setInternalState("Xn", Xn)
2621         selfA._setInternalState("seed", numpy.random.get_state())
2622         #--------------------------
2623         #
2624         if selfA._parameters["StoreInternalVariables"] \
2625             or selfA._toStore("CostFunctionJ") \
2626             or selfA._toStore("CostFunctionJb") \
2627             or selfA._toStore("CostFunctionJo") \
2628             or selfA._toStore("APosterioriCovariance") \
2629             or selfA._toStore("InnovationAtCurrentAnalysis") \
2630             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
2631             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2632             _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
2633             _Innovation = Ynpu - _HXa
2634         #
2635         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2636         # ---> avec analysis
2637         selfA.StoredVariables["Analysis"].store( Xa )
2638         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
2639             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
2640         if selfA._toStore("InnovationAtCurrentAnalysis"):
2641             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
2642         # ---> avec current state
2643         if selfA._parameters["StoreInternalVariables"] \
2644             or selfA._toStore("CurrentState"):
2645             selfA.StoredVariables["CurrentState"].store( Xn )
2646         if selfA._toStore("ForecastState"):
2647             selfA.StoredVariables["ForecastState"].store( EMX )
2648         if selfA._toStore("ForecastCovariance"):
2649             selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
2650         if selfA._toStore("BMA"):
2651             selfA.StoredVariables["BMA"].store( EMX - Xa )
2652         if selfA._toStore("InnovationAtCurrentState"):
2653             selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
2654         if selfA._toStore("SimulatedObservationAtCurrentState") \
2655             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2656             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
2657         # ---> autres
2658         if selfA._parameters["StoreInternalVariables"] \
2659             or selfA._toStore("CostFunctionJ") \
2660             or selfA._toStore("CostFunctionJb") \
2661             or selfA._toStore("CostFunctionJo") \
2662             or selfA._toStore("CurrentOptimum") \
2663             or selfA._toStore("APosterioriCovariance"):
2664             Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
2665             Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
2666             J   = Jb + Jo
2667             selfA.StoredVariables["CostFunctionJb"].store( Jb )
2668             selfA.StoredVariables["CostFunctionJo"].store( Jo )
2669             selfA.StoredVariables["CostFunctionJ" ].store( J )
2670             #
2671             if selfA._toStore("IndexOfOptimum") \
2672                 or selfA._toStore("CurrentOptimum") \
2673                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
2674                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
2675                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
2676                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2677                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2678             if selfA._toStore("IndexOfOptimum"):
2679                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2680             if selfA._toStore("CurrentOptimum"):
2681                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
2682             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2683                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
2684             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2685                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2686             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2687                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2688             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2689                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2690         if selfA._toStore("APosterioriCovariance"):
2691             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
2692         if selfA._parameters["EstimationOf"] == "Parameters" \
2693             and J < previousJMinimum:
2694             previousJMinimum    = J
2695             XaMin               = Xa
2696             if selfA._toStore("APosterioriCovariance"):
2697                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
2698         # ---> Pour les smoothers
2699         if selfA._toStore("CurrentEnsembleState"):
2700             selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
2701     #
2702     # Stockage final supplémentaire de l'optimum en estimation de paramètres
2703     # ----------------------------------------------------------------------
2704     if selfA._parameters["EstimationOf"] == "Parameters":
2705         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
2706         selfA.StoredVariables["Analysis"].store( XaMin )
2707         if selfA._toStore("APosterioriCovariance"):
2708             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
2709         if selfA._toStore("BMA"):
2710             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
2711     #
2712     return 0
2713
2714 # ==============================================================================
2715 def mmqr(
2716         func     = None,
2717         x0       = None,
2718         fprime   = None,
2719         bounds   = None,
2720         quantile = 0.5,
2721         maxfun   = 15000,
2722         toler    = 1.e-06,
2723         y        = None,
2724         ):
2725     """
2726     Implémentation informatique de l'algorithme MMQR, basée sur la publication :
2727     David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
2728     Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
2729     """
2730     #
2731     # Recuperation des donnees et informations initiales
2732     # --------------------------------------------------
2733     variables = numpy.ravel( x0 )
2734     mesures   = numpy.ravel( y )
2735     increment = sys.float_info[0]
2736     p         = variables.size
2737     n         = mesures.size
2738     quantile  = float(quantile)
2739     #
2740     # Calcul des parametres du MM
2741     # ---------------------------
2742     tn      = float(toler) / n
2743     e0      = -tn / math.log(tn)
2744     epsilon = (e0-tn)/(1+math.log(e0))
2745     #
2746     # Calculs d'initialisation
2747     # ------------------------
2748     residus  = mesures - numpy.ravel( func( variables ) )
2749     poids    = 1./(epsilon+numpy.abs(residus))
2750     veps     = 1. - 2. * quantile - residus * poids
2751     lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
2752     iteration = 0
2753     #
2754     # Recherche iterative
2755     # -------------------
2756     while (increment > toler) and (iteration < maxfun) :
2757         iteration += 1
2758         #
2759         Derivees  = numpy.array(fprime(variables))
2760         Derivees  = Derivees.reshape(n,p) # ADAO & check shape
2761         DeriveesT = Derivees.transpose()
2762         M         =   numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
2763         SM        =   numpy.transpose(numpy.dot( DeriveesT , veps ))
2764         step      = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
2765         #
2766         variables = variables + step
2767         if bounds is not None:
2768             # Attention : boucle infinie à éviter si un intervalle est trop petit
2769             while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
2770                 step      = step/2.
2771                 variables = variables - step
2772         residus   = mesures - numpy.ravel( func(variables) )
2773         surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2774         #
2775         while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
2776             step      = step/2.
2777             variables = variables - step
2778             residus   = mesures - numpy.ravel( func(variables) )
2779             surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
2780         #
2781         increment     = lastsurrogate-surrogate
2782         poids         = 1./(epsilon+numpy.abs(residus))
2783         veps          = 1. - 2. * quantile - residus * poids
2784         lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
2785     #
2786     # Mesure d'écart
2787     # --------------
2788     Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
2789     #
2790     return variables, Ecart, [n,p,iteration,increment,0]
2791
2792 # ==============================================================================
2793 def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
2794     """
2795     3DVAR multi-pas et multi-méthodes
2796     """
2797     #
2798     # Initialisation
2799     # --------------
2800     if selfA._parameters["EstimationOf"] == "State":
2801         M = EM["Direct"].appliedControledFormTo
2802         if CM is not None and "Tangent" in CM and U is not None:
2803             Cm = CM["Tangent"].asMatrix(Xb)
2804         else:
2805             Cm = None
2806         #
2807         if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
2808             Xn = numpy.ravel(Xb).reshape((-1,1))
2809             selfA.StoredVariables["Analysis"].store( Xn )
2810             if selfA._toStore("APosterioriCovariance"):
2811                 if hasattr(B,"asfullmatrix"):
2812                     selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(Xn.size) )
2813                 else:
2814                     selfA.StoredVariables["APosterioriCovariance"].store( B )
2815             if selfA._toStore("ForecastState"):
2816                 selfA.StoredVariables["ForecastState"].store( Xn )
2817         elif selfA._parameters["nextStep"]:
2818             Xn = selfA._getInternalState("Xn")
2819     else:
2820         Xn = numpy.ravel(Xb).reshape((-1,1))
2821     #
2822     if hasattr(Y,"stepnumber"):
2823         duration = Y.stepnumber()
2824     else:
2825         duration = 2
2826     #
2827     # Multi-pas
2828     for step in range(duration-1):
2829         if hasattr(Y,"store"):
2830             Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
2831         else:
2832             Ynpu = numpy.ravel( Y ).reshape((-1,1))
2833         #
2834         if U is not None:
2835             if hasattr(U,"store") and len(U)>1:
2836                 Un = numpy.ravel( U[step] ).reshape((-1,1))
2837             elif hasattr(U,"store") and len(U)==1:
2838                 Un = numpy.ravel( U[0] ).reshape((-1,1))
2839             else:
2840                 Un = numpy.ravel( U ).reshape((-1,1))
2841         else:
2842             Un = None
2843         #
2844         if selfA._parameters["EstimationOf"] == "State": # Forecast
2845             Xn_predicted = M( (Xn, Un) )
2846             if selfA._toStore("ForecastState"):
2847                 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
2848             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
2849                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
2850                 Xn_predicted = Xn_predicted + Cm @ Un
2851         elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
2852             # --- > Par principe, M = Id, Q = 0
2853             Xn_predicted = Xn
2854         Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
2855         #
2856         oneCycle(selfA, Xn_predicted, Ynpu, None, HO, None, None, R, B, None)
2857         #
2858         Xn = selfA.StoredVariables["Analysis"][-1]
2859         #--------------------------
2860         selfA._setInternalState("Xn", Xn)
2861     #
2862     return 0
2863
2864 # ==============================================================================
2865 def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
2866     """
2867     3DVAR PSAS
2868     """
2869     #
2870     # Initialisations
2871     # ---------------
2872     Hm = HO["Direct"].appliedTo
2873     #
2874     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
2875         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
2876     else:
2877         HXb = Hm( Xb )
2878     HXb = numpy.ravel( HXb ).reshape((-1,1))
2879     if Y.size != HXb.size:
2880         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
2881     if max(Y.shape) != max(HXb.shape):
2882         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
2883     #
2884     if selfA._toStore("JacobianMatrixAtBackground"):
2885         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
2886         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
2887         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
2888     #
2889     Ht = HO["Tangent"].asMatrix(Xb)
2890     BHT = B * Ht.T
2891     HBHTpR = R + Ht * BHT
2892     Innovation = Y - HXb
2893     #
2894     Xini = numpy.zeros(Xb.shape)
2895     #
2896     # Définition de la fonction-coût
2897     # ------------------------------
2898     def CostFunction(w):
2899         _W = w.reshape((-1,1))
2900         if selfA._parameters["StoreInternalVariables"] or \
2901             selfA._toStore("CurrentState") or \
2902             selfA._toStore("CurrentOptimum"):
2903             selfA.StoredVariables["CurrentState"].store( Xb + BHT @ _W )
2904         if selfA._toStore("SimulatedObservationAtCurrentState") or \
2905             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2906             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT @ _W ) )
2907         if selfA._toStore("InnovationAtCurrentState"):
2908             selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
2909         #
2910         Jb  = float( 0.5 * _W.T @ (HBHTpR @ _W) )
2911         Jo  = float( - _W.T @ Innovation )
2912         J   = Jb + Jo
2913         #
2914         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
2915         selfA.StoredVariables["CostFunctionJb"].store( Jb )
2916         selfA.StoredVariables["CostFunctionJo"].store( Jo )
2917         selfA.StoredVariables["CostFunctionJ" ].store( J )
2918         if selfA._toStore("IndexOfOptimum") or \
2919             selfA._toStore("CurrentOptimum") or \
2920             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
2921             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
2922             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
2923             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2924             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
2925         if selfA._toStore("IndexOfOptimum"):
2926             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
2927         if selfA._toStore("CurrentOptimum"):
2928             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
2929         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
2930             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
2931         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
2932             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
2933         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
2934             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
2935         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
2936             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
2937         return J
2938     #
2939     def GradientOfCostFunction(w):
2940         _W = w.reshape((-1,1))
2941         GradJb  = HBHTpR @ _W
2942         GradJo  = - Innovation
2943         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
2944         return GradJ
2945     #
2946     # Minimisation de la fonctionnelle
2947     # --------------------------------
2948     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
2949     #
2950     if selfA._parameters["Minimizer"] == "LBFGSB":
2951         if "0.19" <= scipy.version.version <= "1.1.0":
2952             import lbfgsbhlt as optimiseur
2953         else:
2954             import scipy.optimize as optimiseur
2955         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
2956             func        = CostFunction,
2957             x0          = Xini,
2958             fprime      = GradientOfCostFunction,
2959             args        = (),
2960             bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
2961             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
2962             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
2963             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2964             iprint      = selfA._parameters["optiprint"],
2965             )
2966         nfeval = Informations['funcalls']
2967         rc     = Informations['warnflag']
2968     elif selfA._parameters["Minimizer"] == "TNC":
2969         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
2970             func        = CostFunction,
2971             x0          = Xini,
2972             fprime      = GradientOfCostFunction,
2973             args        = (),
2974             bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
2975             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
2976             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
2977             ftol        = selfA._parameters["CostDecrementTolerance"],
2978             messages    = selfA._parameters["optmessages"],
2979             )
2980     elif selfA._parameters["Minimizer"] == "CG":
2981         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
2982             f           = CostFunction,
2983             x0          = Xini,
2984             fprime      = GradientOfCostFunction,
2985             args        = (),
2986             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2987             gtol        = selfA._parameters["GradientNormTolerance"],
2988             disp        = selfA._parameters["optdisp"],
2989             full_output = True,
2990             )
2991     elif selfA._parameters["Minimizer"] == "NCG":
2992         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
2993             f           = CostFunction,
2994             x0          = Xini,
2995             fprime      = GradientOfCostFunction,
2996             args        = (),
2997             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
2998             avextol     = selfA._parameters["CostDecrementTolerance"],
2999             disp        = selfA._parameters["optdisp"],
3000             full_output = True,
3001             )
3002     elif selfA._parameters["Minimizer"] == "BFGS":
3003         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3004             f           = CostFunction,
3005             x0          = Xini,
3006             fprime      = GradientOfCostFunction,
3007             args        = (),
3008             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3009             gtol        = selfA._parameters["GradientNormTolerance"],
3010             disp        = selfA._parameters["optdisp"],
3011             full_output = True,
3012             )
3013     else:
3014         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3015     #
3016     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3017     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3018     #
3019     # Correction pour pallier a un bug de TNC sur le retour du Minimum
3020     # ----------------------------------------------------------------
3021     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3022         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3023     else:
3024         Minimum = Xb + BHT @ Minimum.reshape((-1,1))
3025     #
3026     Xa = Minimum
3027     #--------------------------
3028     #
3029     selfA.StoredVariables["Analysis"].store( Xa )
3030     #
3031     if selfA._toStore("OMA") or \
3032         selfA._toStore("SigmaObs2") or \
3033         selfA._toStore("SimulationQuantiles") or \
3034         selfA._toStore("SimulatedObservationAtOptimum"):
3035         if selfA._toStore("SimulatedObservationAtCurrentState"):
3036             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3037         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3038             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3039         else:
3040             HXa = Hm( Xa )
3041     #
3042     if selfA._toStore("APosterioriCovariance") or \
3043         selfA._toStore("SimulationQuantiles") or \
3044         selfA._toStore("JacobianMatrixAtOptimum") or \
3045         selfA._toStore("KalmanGainAtOptimum"):
3046         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3047         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3048     if selfA._toStore("APosterioriCovariance") or \
3049         selfA._toStore("SimulationQuantiles") or \
3050         selfA._toStore("KalmanGainAtOptimum"):
3051         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3052         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3053     if selfA._toStore("APosterioriCovariance") or \
3054         selfA._toStore("SimulationQuantiles"):
3055         BI = B.getI()
3056         RI = R.getI()
3057         A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
3058     if selfA._toStore("APosterioriCovariance"):
3059         selfA.StoredVariables["APosterioriCovariance"].store( A )
3060     if selfA._toStore("JacobianMatrixAtOptimum"):
3061         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3062     if selfA._toStore("KalmanGainAtOptimum"):
3063         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3064         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3065         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3066     #
3067     # Calculs et/ou stockages supplémentaires
3068     # ---------------------------------------
3069     if selfA._toStore("Innovation") or \
3070         selfA._toStore("SigmaObs2") or \
3071         selfA._toStore("MahalanobisConsistency") or \
3072         selfA._toStore("OMB"):
3073         d  = Y - HXb
3074     if selfA._toStore("Innovation"):
3075         selfA.StoredVariables["Innovation"].store( d )
3076     if selfA._toStore("BMA"):
3077         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3078     if selfA._toStore("OMA"):
3079         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3080     if selfA._toStore("OMB"):
3081         selfA.StoredVariables["OMB"].store( d )
3082     if selfA._toStore("SigmaObs2"):
3083         TraceR = R.trace(Y.size)
3084         selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
3085     if selfA._toStore("MahalanobisConsistency"):
3086         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3087     if selfA._toStore("SimulationQuantiles"):
3088         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
3089     if selfA._toStore("SimulatedObservationAtBackground"):
3090         selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
3091     if selfA._toStore("SimulatedObservationAtOptimum"):
3092         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
3093     #
3094     return 0
3095
3096 # ==============================================================================
3097 def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
3098     VariantM="KalmanFilterFormula16",
3099     Hybrid=None,
3100     ):
3101     """
3102     Stochastic EnKF
3103     """
3104     if selfA._parameters["EstimationOf"] == "Parameters":
3105         selfA._parameters["StoreInternalVariables"] = True
3106     #
3107     # Opérateurs
3108     H = HO["Direct"].appliedControledFormTo
3109     #
3110     if selfA._parameters["EstimationOf"] == "State":
3111         M = EM["Direct"].appliedControledFormTo
3112     #
3113     if CM is not None and "Tangent" in CM and U is not None:
3114         Cm = CM["Tangent"].asMatrix(Xb)
3115     else:
3116         Cm = None
3117     #
3118     # Durée d'observation et tailles
3119     if hasattr(Y,"stepnumber"):
3120         duration = Y.stepnumber()
3121         __p = numpy.cumprod(Y.shape())[-1]
3122     else:
3123         duration = 2
3124         __p = numpy.array(Y).size
3125     #
3126     # Précalcul des inversions de B et R
3127     if selfA._parameters["StoreInternalVariables"] \
3128         or selfA._toStore("CostFunctionJ") \
3129         or selfA._toStore("CostFunctionJb") \
3130         or selfA._toStore("CostFunctionJo") \
3131         or selfA._toStore("CurrentOptimum") \
3132         or selfA._toStore("APosterioriCovariance"):
3133         BI = B.getI()
3134         RI = R.getI()
3135     #
3136     __n = Xb.size
3137     __m = selfA._parameters["NumberOfMembers"]
3138     #
3139     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
3140     else:                         Rn = R
3141     #
3142     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
3143         if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
3144         else:                         Pn = B
3145         Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
3146         selfA.StoredVariables["Analysis"].store( Xb )
3147         if selfA._toStore("APosterioriCovariance"):
3148             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3149         selfA._setInternalState("seed", numpy.random.get_state())
3150     elif selfA._parameters["nextStep"]:
3151         Xn = selfA._getInternalState("Xn")
3152     #
3153     previousJMinimum = numpy.finfo(float).max
3154     #
3155     for step in range(duration-1):
3156         numpy.random.set_state(selfA._getInternalState("seed"))
3157         if hasattr(Y,"store"):
3158             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
3159         else:
3160             Ynpu = numpy.ravel( Y ).reshape((__p,1))
3161         #
3162         if U is not None:
3163             if hasattr(U,"store") and len(U)>1:
3164                 Un = numpy.ravel( U[step] ).reshape((-1,1))
3165             elif hasattr(U,"store") and len(U)==1:
3166                 Un = numpy.ravel( U[0] ).reshape((-1,1))
3167             else:
3168                 Un = numpy.ravel( U ).reshape((-1,1))
3169         else:
3170             Un = None
3171         #
3172         if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
3173             Xn = CovarianceInflation( Xn,
3174                 selfA._parameters["InflationType"],
3175                 selfA._parameters["InflationFactor"],
3176                 )
3177         #
3178         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
3179             EMX = M( [(Xn[:,i], Un) for i in range(__m)],
3180                 argsAsSerie = True,
3181                 returnSerieAsArrayMatrix = True )
3182             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
3183             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
3184                 argsAsSerie = True,
3185                 returnSerieAsArrayMatrix = True )
3186             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
3187                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
3188                 Xn_predicted = Xn_predicted + Cm @ Un
3189         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
3190             # --- > Par principe, M = Id, Q = 0
3191             Xn_predicted = EMX = Xn
3192             HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
3193                 argsAsSerie = True,
3194                 returnSerieAsArrayMatrix = True )
3195         #
3196         # Mean of forecast and observation of forecast
3197         Xfm  = EnsembleMean( Xn_predicted )
3198         Hfm  = EnsembleMean( HX_predicted )
3199         #
3200         #--------------------------
3201         if VariantM == "KalmanFilterFormula05":
3202             PfHT, HPfHT = 0., 0.
3203             for i in range(__m):
3204                 Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
3205                 Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
3206                 PfHT  += Exfi * Eyfi.T
3207                 HPfHT += Eyfi * Eyfi.T
3208             PfHT  = (1./(__m-1)) * PfHT
3209             HPfHT = (1./(__m-1)) * HPfHT
3210             Kn     = PfHT * ( R + HPfHT ).I
3211             del PfHT, HPfHT
3212             #
3213             for i in range(__m):
3214                 ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
3215                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
3216         #--------------------------
3217         elif VariantM == "KalmanFilterFormula16":
3218             EpY   = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
3219             EpYm  = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
3220             #
3221             EaX   = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
3222             EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
3223             #
3224             Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
3225             #
3226             for i in range(__m):
3227                 Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
3228         #--------------------------
3229         else:
3230             raise ValueError("VariantM has to be chosen in the authorized methods list.")
3231         #
3232         if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
3233             Xn = CovarianceInflation( Xn,
3234                 selfA._parameters["InflationType"],
3235                 selfA._parameters["InflationFactor"],
3236                 )
3237         #
3238         if Hybrid == "E3DVAR":
3239             betaf = selfA._parameters["HybridCovarianceEquilibrium"]
3240             Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
3241         #
3242         Xa = EnsembleMean( Xn )
3243         #--------------------------
3244         selfA._setInternalState("Xn", Xn)
3245         selfA._setInternalState("seed", numpy.random.get_state())
3246         #--------------------------
3247         #
3248         if selfA._parameters["StoreInternalVariables"] \
3249             or selfA._toStore("CostFunctionJ") \
3250             or selfA._toStore("CostFunctionJb") \
3251             or selfA._toStore("CostFunctionJo") \
3252             or selfA._toStore("APosterioriCovariance") \
3253             or selfA._toStore("InnovationAtCurrentAnalysis") \
3254             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
3255             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3256             _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
3257             _Innovation = Ynpu - _HXa
3258         #
3259         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3260         # ---> avec analysis
3261         selfA.StoredVariables["Analysis"].store( Xa )
3262         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
3263             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
3264         if selfA._toStore("InnovationAtCurrentAnalysis"):
3265             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
3266         # ---> avec current state
3267         if selfA._parameters["StoreInternalVariables"] \
3268             or selfA._toStore("CurrentState"):
3269             selfA.StoredVariables["CurrentState"].store( Xn )
3270         if selfA._toStore("ForecastState"):
3271             selfA.StoredVariables["ForecastState"].store( EMX )
3272         if selfA._toStore("ForecastCovariance"):
3273             selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
3274         if selfA._toStore("BMA"):
3275             selfA.StoredVariables["BMA"].store( EMX - Xa )
3276         if selfA._toStore("InnovationAtCurrentState"):
3277             selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
3278         if selfA._toStore("SimulatedObservationAtCurrentState") \
3279             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3280             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
3281         # ---> autres
3282         if selfA._parameters["StoreInternalVariables"] \
3283             or selfA._toStore("CostFunctionJ") \
3284             or selfA._toStore("CostFunctionJb") \
3285             or selfA._toStore("CostFunctionJo") \
3286             or selfA._toStore("CurrentOptimum") \
3287             or selfA._toStore("APosterioriCovariance"):
3288             Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
3289             Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
3290             J   = Jb + Jo
3291             selfA.StoredVariables["CostFunctionJb"].store( Jb )
3292             selfA.StoredVariables["CostFunctionJo"].store( Jo )
3293             selfA.StoredVariables["CostFunctionJ" ].store( J )
3294             #
3295             if selfA._toStore("IndexOfOptimum") \
3296                 or selfA._toStore("CurrentOptimum") \
3297                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
3298                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
3299                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
3300                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3301                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3302             if selfA._toStore("IndexOfOptimum"):
3303                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3304             if selfA._toStore("CurrentOptimum"):
3305                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
3306             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3307                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
3308             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3309                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3310             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3311                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3312             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3313                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3314         if selfA._toStore("APosterioriCovariance"):
3315             selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
3316         if selfA._parameters["EstimationOf"] == "Parameters" \
3317             and J < previousJMinimum:
3318             previousJMinimum    = J
3319             XaMin               = Xa
3320             if selfA._toStore("APosterioriCovariance"):
3321                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
3322         # ---> Pour les smoothers
3323         if selfA._toStore("CurrentEnsembleState"):
3324             selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
3325     #
3326     # Stockage final supplémentaire de l'optimum en estimation de paramètres
3327     # ----------------------------------------------------------------------
3328     if selfA._parameters["EstimationOf"] == "Parameters":
3329         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3330         selfA.StoredVariables["Analysis"].store( XaMin )
3331         if selfA._toStore("APosterioriCovariance"):
3332             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3333         if selfA._toStore("BMA"):
3334             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3335     #
3336     return 0
3337
3338 # ==============================================================================
3339 def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3340     """
3341     3DVAR
3342     """
3343     #
3344     # Initialisations
3345     # ---------------
3346     Hm = HO["Direct"].appliedTo
3347     Ha = HO["Adjoint"].appliedInXTo
3348     #
3349     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
3350         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
3351     else:
3352         HXb = Hm( Xb )
3353     HXb = HXb.reshape((-1,1))
3354     if Y.size != HXb.size:
3355         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
3356     if max(Y.shape) != max(HXb.shape):
3357         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
3358     #
3359     if selfA._toStore("JacobianMatrixAtBackground"):
3360         HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
3361         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
3362         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
3363     #
3364     BI = B.getI()
3365     RI = R.getI()
3366     #
3367     Xini = selfA._parameters["InitializationPoint"]
3368     #
3369     # Définition de la fonction-coût
3370     # ------------------------------
3371     def CostFunction(x):
3372         _X  = numpy.ravel( x ).reshape((-1,1))
3373         if selfA._parameters["StoreInternalVariables"] or \
3374             selfA._toStore("CurrentState") or \
3375             selfA._toStore("CurrentOptimum"):
3376             selfA.StoredVariables["CurrentState"].store( _X )
3377         _HX = Hm( _X ).reshape((-1,1))
3378         _Innovation = Y - _HX
3379         if selfA._toStore("SimulatedObservationAtCurrentState") or \
3380             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3381             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
3382         if selfA._toStore("InnovationAtCurrentState"):
3383             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3384         #
3385         Jb  = float( 0.5 * (_X - Xb).T * (BI * (_X - Xb)) )
3386         Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
3387         J   = Jb + Jo
3388         #
3389         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3390         selfA.StoredVariables["CostFunctionJb"].store( Jb )
3391         selfA.StoredVariables["CostFunctionJo"].store( Jo )
3392         selfA.StoredVariables["CostFunctionJ" ].store( J )
3393         if selfA._toStore("IndexOfOptimum") or \
3394             selfA._toStore("CurrentOptimum") or \
3395             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3396             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3397             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
3398             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3399             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3400         if selfA._toStore("IndexOfOptimum"):
3401             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3402         if selfA._toStore("CurrentOptimum"):
3403             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3404         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3405             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
3406         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3407             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3408         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3409             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3410         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3411             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3412         return J
3413     #
3414     def GradientOfCostFunction(x):
3415         _X      = x.reshape((-1,1))
3416         _HX     = Hm( _X ).reshape((-1,1))
3417         GradJb  = BI * (_X - Xb)
3418         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
3419         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
3420         return GradJ
3421     #
3422     # Minimisation de la fonctionnelle
3423     # --------------------------------
3424     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3425     #
3426     if selfA._parameters["Minimizer"] == "LBFGSB":
3427         if "0.19" <= scipy.version.version <= "1.1.0":
3428             import lbfgsbhlt as optimiseur
3429         else:
3430             import scipy.optimize as optimiseur
3431         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3432             func        = CostFunction,
3433             x0          = Xini,
3434             fprime      = GradientOfCostFunction,
3435             args        = (),
3436             bounds      = selfA._parameters["Bounds"],
3437             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
3438             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
3439             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3440             iprint      = selfA._parameters["optiprint"],
3441             )
3442         nfeval = Informations['funcalls']
3443         rc     = Informations['warnflag']
3444     elif selfA._parameters["Minimizer"] == "TNC":
3445         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3446             func        = CostFunction,
3447             x0          = Xini,
3448             fprime      = GradientOfCostFunction,
3449             args        = (),
3450             bounds      = selfA._parameters["Bounds"],
3451             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
3452             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3453             ftol        = selfA._parameters["CostDecrementTolerance"],
3454             messages    = selfA._parameters["optmessages"],
3455             )
3456     elif selfA._parameters["Minimizer"] == "CG":
3457         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3458             f           = CostFunction,
3459             x0          = Xini,
3460             fprime      = GradientOfCostFunction,
3461             args        = (),
3462             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3463             gtol        = selfA._parameters["GradientNormTolerance"],
3464             disp        = selfA._parameters["optdisp"],
3465             full_output = True,
3466             )
3467     elif selfA._parameters["Minimizer"] == "NCG":
3468         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3469             f           = CostFunction,
3470             x0          = Xini,
3471             fprime      = GradientOfCostFunction,
3472             args        = (),
3473             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3474             avextol     = selfA._parameters["CostDecrementTolerance"],
3475             disp        = selfA._parameters["optdisp"],
3476             full_output = True,
3477             )
3478     elif selfA._parameters["Minimizer"] == "BFGS":
3479         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3480             f           = CostFunction,
3481             x0          = Xini,
3482             fprime      = GradientOfCostFunction,
3483             args        = (),
3484             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3485             gtol        = selfA._parameters["GradientNormTolerance"],
3486             disp        = selfA._parameters["optdisp"],
3487             full_output = True,
3488             )
3489     else:
3490         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3491     #
3492     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3493     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3494     #
3495     # Correction pour pallier a un bug de TNC sur le retour du Minimum
3496     # ----------------------------------------------------------------
3497     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3498         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3499     #
3500     Xa = Minimum
3501     #--------------------------
3502     #
3503     selfA.StoredVariables["Analysis"].store( Xa )
3504     #
3505     if selfA._toStore("OMA") or \
3506         selfA._toStore("SigmaObs2") or \
3507         selfA._toStore("SimulationQuantiles") or \
3508         selfA._toStore("SimulatedObservationAtOptimum"):
3509         if selfA._toStore("SimulatedObservationAtCurrentState"):
3510             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
3511         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3512             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
3513         else:
3514             HXa = Hm( Xa )
3515     #
3516     if selfA._toStore("APosterioriCovariance") or \
3517         selfA._toStore("SimulationQuantiles") or \
3518         selfA._toStore("JacobianMatrixAtOptimum") or \
3519         selfA._toStore("KalmanGainAtOptimum"):
3520         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
3521         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
3522     if selfA._toStore("APosterioriCovariance") or \
3523         selfA._toStore("SimulationQuantiles") or \
3524         selfA._toStore("KalmanGainAtOptimum"):
3525         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
3526         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
3527     if selfA._toStore("APosterioriCovariance") or \
3528         selfA._toStore("SimulationQuantiles"):
3529         A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
3530     if selfA._toStore("APosterioriCovariance"):
3531         selfA.StoredVariables["APosterioriCovariance"].store( A )
3532     if selfA._toStore("JacobianMatrixAtOptimum"):
3533         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
3534     if selfA._toStore("KalmanGainAtOptimum"):
3535         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
3536         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
3537         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
3538     #
3539     # Calculs et/ou stockages supplémentaires
3540     # ---------------------------------------
3541     if selfA._toStore("Innovation") or \
3542         selfA._toStore("SigmaObs2") or \
3543         selfA._toStore("MahalanobisConsistency") or \
3544         selfA._toStore("OMB"):
3545         d  = Y - HXb
3546     if selfA._toStore("Innovation"):
3547         selfA.StoredVariables["Innovation"].store( d )
3548     if selfA._toStore("BMA"):
3549         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3550     if selfA._toStore("OMA"):
3551         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
3552     if selfA._toStore("OMB"):
3553         selfA.StoredVariables["OMB"].store( d )
3554     if selfA._toStore("SigmaObs2"):
3555         TraceR = R.trace(Y.size)
3556         selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
3557     if selfA._toStore("MahalanobisConsistency"):
3558         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
3559     if selfA._toStore("SimulationQuantiles"):
3560         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
3561     if selfA._toStore("SimulatedObservationAtBackground"):
3562         selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
3563     if selfA._toStore("SimulatedObservationAtOptimum"):
3564         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
3565     #
3566     return 0
3567
3568 # ==============================================================================
3569 def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3570     """
3571     4DVAR
3572     """
3573     #
3574     # Initialisations
3575     # ---------------
3576     #
3577     # Opérateurs
3578     Hm = HO["Direct"].appliedControledFormTo
3579     Mm = EM["Direct"].appliedControledFormTo
3580     #
3581     if CM is not None and "Tangent" in CM and U is not None:
3582         Cm = CM["Tangent"].asMatrix(Xb)
3583     else:
3584         Cm = None
3585     #
3586     def Un(_step):
3587         if U is not None:
3588             if hasattr(U,"store") and 1<=_step<len(U) :
3589                 _Un = numpy.ravel( U[_step] ).reshape((-1,1))
3590             elif hasattr(U,"store") and len(U)==1:
3591                 _Un = numpy.ravel( U[0] ).reshape((-1,1))
3592             else:
3593                 _Un = numpy.ravel( U ).reshape((-1,1))
3594         else:
3595             _Un = None
3596         return _Un
3597     def CmUn(_xn,_un):
3598         if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
3599             _Cm   = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
3600             _CmUn = (_Cm @ _un).reshape((-1,1))
3601         else:
3602             _CmUn = 0.
3603         return _CmUn
3604     #
3605     # Remarque : les observations sont exploitées à partir du pas de temps
3606     # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
3607     # Donc le pas 0 n'est pas utilisé puisque la première étape commence
3608     # avec l'observation du pas 1.
3609     #
3610     # Nombre de pas identique au nombre de pas d'observations
3611     if hasattr(Y,"stepnumber"):
3612         duration = Y.stepnumber()
3613     else:
3614         duration = 2
3615     #
3616     # Précalcul des inversions de B et R
3617     BI = B.getI()
3618     RI = R.getI()
3619     #
3620     # Point de démarrage de l'optimisation
3621     Xini = selfA._parameters["InitializationPoint"]
3622     #
3623     # Définition de la fonction-coût
3624     # ------------------------------
3625     selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
3626     selfA.DirectInnovation  = [None,] # Le pas 0 n'est pas observé
3627     def CostFunction(x):
3628         _X  = numpy.ravel( x ).reshape((-1,1))
3629         if selfA._parameters["StoreInternalVariables"] or \
3630             selfA._toStore("CurrentState") or \
3631             selfA._toStore("CurrentOptimum"):
3632             selfA.StoredVariables["CurrentState"].store( _X )
3633         Jb  = float( 0.5 * (_X - Xb).T * (BI * (_X - Xb)) )
3634         selfA.DirectCalculation = [None,]
3635         selfA.DirectInnovation  = [None,]
3636         Jo  = 0.
3637         _Xn = _X
3638         for step in range(0,duration-1):
3639             if hasattr(Y,"store"):
3640                 _Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
3641             else:
3642                 _Ynpu = numpy.ravel( Y ).reshape((-1,1))
3643             _Un = Un(step)
3644             #
3645             # Etape d'évolution
3646             if selfA._parameters["EstimationOf"] == "State":
3647                 _Xn = Mm( (_Xn, _Un) ).reshape((-1,1)) + CmUn(_Xn, _Un)
3648             elif selfA._parameters["EstimationOf"] == "Parameters":
3649                 pass
3650             #
3651             if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
3652                 _Xn = ApplyBounds( _Xn, ForceNumericBounds(selfA._parameters["Bounds"]) )
3653             #
3654             # Etape de différence aux observations
3655             if selfA._parameters["EstimationOf"] == "State":
3656                 _YmHMX = _Ynpu - numpy.ravel( Hm( (_Xn, None) ) ).reshape((-1,1))
3657             elif selfA._parameters["EstimationOf"] == "Parameters":
3658                 _YmHMX = _Ynpu - numpy.ravel( Hm( (_Xn, _Un) ) ).reshape((-1,1)) - CmUn(_Xn, _Un)
3659             #
3660             # Stockage de l'état
3661             selfA.DirectCalculation.append( _Xn )
3662             selfA.DirectInnovation.append( _YmHMX )
3663             #
3664             # Ajout dans la fonctionnelle d'observation
3665             Jo = Jo + 0.5 * float( _YmHMX.T * (RI * _YmHMX) )
3666         J = Jb + Jo
3667         #
3668         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
3669         selfA.StoredVariables["CostFunctionJb"].store( Jb )
3670         selfA.StoredVariables["CostFunctionJo"].store( Jo )
3671         selfA.StoredVariables["CostFunctionJ" ].store( J )
3672         if selfA._toStore("IndexOfOptimum") or \
3673             selfA._toStore("CurrentOptimum") or \
3674             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
3675             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
3676             selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3677             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3678         if selfA._toStore("IndexOfOptimum"):
3679             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3680         if selfA._toStore("CurrentOptimum"):
3681             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
3682         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3683             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3684         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3685             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3686         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3687             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3688         return J
3689     #
3690     def GradientOfCostFunction(x):
3691         _X      = numpy.ravel( x ).reshape((-1,1))
3692         GradJb  = BI * (_X - Xb)
3693         GradJo  = 0.
3694         for step in range(duration-1,0,-1):
3695             # Étape de récupération du dernier stockage de l'évolution
3696             _Xn = selfA.DirectCalculation.pop()
3697             # Étape de récupération du dernier stockage de l'innovation
3698             _YmHMX = selfA.DirectInnovation.pop()
3699             # Calcul des adjoints
3700             Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
3701             Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
3702             Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
3703             Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
3704             # Calcul du gradient par état adjoint
3705             GradJo = GradJo + Ha * (RI * _YmHMX) # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
3706             GradJo = Ma * GradJo                 # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
3707         GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
3708         return GradJ
3709     #
3710     # Minimisation de la fonctionnelle
3711     # --------------------------------
3712     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
3713     #
3714     if selfA._parameters["Minimizer"] == "LBFGSB":
3715         if "0.19" <= scipy.version.version <= "1.1.0":
3716             import lbfgsbhlt as optimiseur
3717         else:
3718             import scipy.optimize as optimiseur
3719         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
3720             func        = CostFunction,
3721             x0          = Xini,
3722             fprime      = GradientOfCostFunction,
3723             args        = (),
3724             bounds      = selfA._parameters["Bounds"],
3725             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
3726             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
3727             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3728             iprint      = selfA._parameters["optiprint"],
3729             )
3730         nfeval = Informations['funcalls']
3731         rc     = Informations['warnflag']
3732     elif selfA._parameters["Minimizer"] == "TNC":
3733         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
3734             func        = CostFunction,
3735             x0          = Xini,
3736             fprime      = GradientOfCostFunction,
3737             args        = (),
3738             bounds      = selfA._parameters["Bounds"],
3739             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
3740             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
3741             ftol        = selfA._parameters["CostDecrementTolerance"],
3742             messages    = selfA._parameters["optmessages"],
3743             )
3744     elif selfA._parameters["Minimizer"] == "CG":
3745         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
3746             f           = CostFunction,
3747             x0          = Xini,
3748             fprime      = GradientOfCostFunction,
3749             args        = (),
3750             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3751             gtol        = selfA._parameters["GradientNormTolerance"],
3752             disp        = selfA._parameters["optdisp"],
3753             full_output = True,
3754             )
3755     elif selfA._parameters["Minimizer"] == "NCG":
3756         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
3757             f           = CostFunction,
3758             x0          = Xini,
3759             fprime      = GradientOfCostFunction,
3760             args        = (),
3761             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3762             avextol     = selfA._parameters["CostDecrementTolerance"],
3763             disp        = selfA._parameters["optdisp"],
3764             full_output = True,
3765             )
3766     elif selfA._parameters["Minimizer"] == "BFGS":
3767         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
3768             f           = CostFunction,
3769             x0          = Xini,
3770             fprime      = GradientOfCostFunction,
3771             args        = (),
3772             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
3773             gtol        = selfA._parameters["GradientNormTolerance"],
3774             disp        = selfA._parameters["optdisp"],
3775             full_output = True,
3776             )
3777     else:
3778         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
3779     #
3780     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3781     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
3782     #
3783     # Correction pour pallier a un bug de TNC sur le retour du Minimum
3784     # ----------------------------------------------------------------
3785     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
3786         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
3787     #
3788     # Obtention de l'analyse
3789     # ----------------------
3790     Xa = Minimum
3791     #
3792     selfA.StoredVariables["Analysis"].store( Xa )
3793     #
3794     # Calculs et/ou stockages supplémentaires
3795     # ---------------------------------------
3796     if selfA._toStore("BMA"):
3797         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
3798     #
3799     return 0
3800
3801 # ==============================================================================
3802 def stdkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3803     """
3804     Standard Kalman Filter
3805     """
3806     if selfA._parameters["EstimationOf"] == "Parameters":
3807         selfA._parameters["StoreInternalVariables"] = True
3808     #
3809     # Opérateurs
3810     # ----------
3811     Ht = HO["Tangent"].asMatrix(Xb)
3812     Ha = HO["Adjoint"].asMatrix(Xb)
3813     #
3814     if selfA._parameters["EstimationOf"] == "State":
3815         Mt = EM["Tangent"].asMatrix(Xb)
3816         Ma = EM["Adjoint"].asMatrix(Xb)
3817     #
3818     if CM is not None and "Tangent" in CM and U is not None:
3819         Cm = CM["Tangent"].asMatrix(Xb)
3820     else:
3821         Cm = None
3822     #
3823     # Durée d'observation et tailles
3824     if hasattr(Y,"stepnumber"):
3825         duration = Y.stepnumber()
3826         __p = numpy.cumprod(Y.shape())[-1]
3827     else:
3828         duration = 2
3829         __p = numpy.array(Y).size
3830     #
3831     # Précalcul des inversions de B et R
3832     if selfA._parameters["StoreInternalVariables"] \
3833         or selfA._toStore("CostFunctionJ") \
3834         or selfA._toStore("CostFunctionJb") \
3835         or selfA._toStore("CostFunctionJo") \
3836         or selfA._toStore("CurrentOptimum") \
3837         or selfA._toStore("APosterioriCovariance"):
3838         BI = B.getI()
3839         RI = R.getI()
3840     #
3841     __n = Xb.size
3842     #
3843     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
3844         Xn = Xb
3845         Pn = B
3846         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3847         selfA.StoredVariables["Analysis"].store( Xb )
3848         if selfA._toStore("APosterioriCovariance"):
3849             if hasattr(B,"asfullmatrix"):
3850                 selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
3851             else:
3852                 selfA.StoredVariables["APosterioriCovariance"].store( B )
3853         selfA._setInternalState("seed", numpy.random.get_state())
3854     elif selfA._parameters["nextStep"]:
3855         Xn = selfA._getInternalState("Xn")
3856         Pn = selfA._getInternalState("Pn")
3857     #
3858     if selfA._parameters["EstimationOf"] == "Parameters":
3859         XaMin            = Xn
3860         previousJMinimum = numpy.finfo(float).max
3861     #
3862     for step in range(duration-1):
3863         if hasattr(Y,"store"):
3864             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
3865         else:
3866             Ynpu = numpy.ravel( Y ).reshape((__p,1))
3867         #
3868         if U is not None:
3869             if hasattr(U,"store") and len(U)>1:
3870                 Un = numpy.ravel( U[step] ).reshape((-1,1))
3871             elif hasattr(U,"store") and len(U)==1:
3872                 Un = numpy.ravel( U[0] ).reshape((-1,1))
3873             else:
3874                 Un = numpy.ravel( U ).reshape((-1,1))
3875         else:
3876             Un = None
3877         #
3878         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
3879             Xn_predicted = Mt @ Xn
3880             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
3881                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
3882                 Xn_predicted = Xn_predicted + Cm @ Un
3883             Pn_predicted = Q + Mt * (Pn * Ma)
3884         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
3885             # --- > Par principe, M = Id, Q = 0
3886             Xn_predicted = Xn
3887             Pn_predicted = Pn
3888         #
3889         if selfA._parameters["EstimationOf"] == "State":
3890             HX_predicted = Ht @ Xn_predicted
3891             _Innovation  = Ynpu - HX_predicted
3892         elif selfA._parameters["EstimationOf"] == "Parameters":
3893             HX_predicted = Ht @ Xn_predicted
3894             _Innovation  = Ynpu - HX_predicted
3895             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
3896                 _Innovation = _Innovation - Cm @ Un
3897         #
3898         Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
3899         Xn = Xn_predicted + Kn * _Innovation
3900         Pn = Pn_predicted - Kn * Ht * Pn_predicted
3901         #
3902         Xa = Xn # Pointeurs
3903         #--------------------------
3904         selfA._setInternalState("Xn", Xn)
3905         selfA._setInternalState("Pn", Pn)
3906         #--------------------------
3907         #
3908         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3909         # ---> avec analysis
3910         selfA.StoredVariables["Analysis"].store( Xa )
3911         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
3912             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Ht * Xa )
3913         if selfA._toStore("InnovationAtCurrentAnalysis"):
3914             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
3915         # ---> avec current state
3916         if selfA._parameters["StoreInternalVariables"] \
3917             or selfA._toStore("CurrentState"):
3918             selfA.StoredVariables["CurrentState"].store( Xn )
3919         if selfA._toStore("ForecastState"):
3920             selfA.StoredVariables["ForecastState"].store( Xn_predicted )
3921         if selfA._toStore("ForecastCovariance"):
3922             selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
3923         if selfA._toStore("BMA"):
3924             selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
3925         if selfA._toStore("InnovationAtCurrentState"):
3926             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
3927         if selfA._toStore("SimulatedObservationAtCurrentState") \
3928             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3929             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
3930         # ---> autres
3931         if selfA._parameters["StoreInternalVariables"] \
3932             or selfA._toStore("CostFunctionJ") \
3933             or selfA._toStore("CostFunctionJb") \
3934             or selfA._toStore("CostFunctionJo") \
3935             or selfA._toStore("CurrentOptimum") \
3936             or selfA._toStore("APosterioriCovariance"):
3937             Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
3938             Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
3939             J   = Jb + Jo
3940             selfA.StoredVariables["CostFunctionJb"].store( Jb )
3941             selfA.StoredVariables["CostFunctionJo"].store( Jo )
3942             selfA.StoredVariables["CostFunctionJ" ].store( J )
3943             #
3944             if selfA._toStore("IndexOfOptimum") \
3945                 or selfA._toStore("CurrentOptimum") \
3946                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
3947                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
3948                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
3949                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3950                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
3951             if selfA._toStore("IndexOfOptimum"):
3952                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
3953             if selfA._toStore("CurrentOptimum"):
3954                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
3955             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
3956                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
3957             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
3958                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
3959             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
3960                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
3961             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
3962                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
3963         if selfA._toStore("APosterioriCovariance"):
3964             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
3965         if selfA._parameters["EstimationOf"] == "Parameters" \
3966             and J < previousJMinimum:
3967             previousJMinimum    = J
3968             XaMin               = Xa
3969             if selfA._toStore("APosterioriCovariance"):
3970                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
3971     #
3972     # Stockage final supplémentaire de l'optimum en estimation de paramètres
3973     # ----------------------------------------------------------------------
3974     if selfA._parameters["EstimationOf"] == "Parameters":
3975         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
3976         selfA.StoredVariables["Analysis"].store( XaMin )
3977         if selfA._toStore("APosterioriCovariance"):
3978             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
3979         if selfA._toStore("BMA"):
3980             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
3981     #
3982     return 0
3983
3984 # ==============================================================================
3985 def uskf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
3986     """
3987     Unscented Kalman Filter
3988     """
3989     if selfA._parameters["EstimationOf"] == "Parameters":
3990         selfA._parameters["StoreInternalVariables"] = True
3991     #
3992     L     = Xb.size
3993     Alpha = selfA._parameters["Alpha"]
3994     Beta  = selfA._parameters["Beta"]
3995     if selfA._parameters["Kappa"] == 0:
3996         if selfA._parameters["EstimationOf"] == "State":
3997             Kappa = 0
3998         elif selfA._parameters["EstimationOf"] == "Parameters":
3999             Kappa = 3 - L
4000     else:
4001         Kappa = selfA._parameters["Kappa"]
4002     Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
4003     Gamma  = math.sqrt( L + Lambda )
4004     #
4005     Ww = []
4006     Ww.append( 0. )
4007     for i in range(2*L):
4008         Ww.append( 1. / (2.*(L + Lambda)) )
4009     #
4010     Wm = numpy.array( Ww )
4011     Wm[0] = Lambda / (L + Lambda)
4012     Wc = numpy.array( Ww )
4013     Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
4014     #
4015     # Opérateurs
4016     Hm = HO["Direct"].appliedControledFormTo
4017     #
4018     if selfA._parameters["EstimationOf"] == "State":
4019         Mm = EM["Direct"].appliedControledFormTo
4020     #
4021     if CM is not None and "Tangent" in CM and U is not None:
4022         Cm = CM["Tangent"].asMatrix(Xb)
4023     else:
4024         Cm = None
4025     #
4026     # Durée d'observation et tailles
4027     if hasattr(Y,"stepnumber"):
4028         duration = Y.stepnumber()
4029         __p = numpy.cumprod(Y.shape())[-1]
4030     else:
4031         duration = 2
4032         __p = numpy.array(Y).size
4033     #
4034     # Précalcul des inversions de B et R
4035     if selfA._parameters["StoreInternalVariables"] \
4036         or selfA._toStore("CostFunctionJ") \
4037         or selfA._toStore("CostFunctionJb") \
4038         or selfA._toStore("CostFunctionJo") \
4039         or selfA._toStore("CurrentOptimum") \
4040         or selfA._toStore("APosterioriCovariance"):
4041         BI = B.getI()
4042         RI = R.getI()
4043     #
4044     __n = Xb.size
4045     #
4046     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
4047         Xn = Xb
4048         if hasattr(B,"asfullmatrix"):
4049             Pn = B.asfullmatrix(__n)
4050         else:
4051             Pn = B
4052         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
4053         selfA.StoredVariables["Analysis"].store( Xb )
4054         if selfA._toStore("APosterioriCovariance"):
4055             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
4056     elif selfA._parameters["nextStep"]:
4057         Xn = selfA._getInternalState("Xn")
4058         Pn = selfA._getInternalState("Pn")
4059     #
4060     if selfA._parameters["EstimationOf"] == "Parameters":
4061         XaMin            = Xn
4062         previousJMinimum = numpy.finfo(float).max
4063     #
4064     for step in range(duration-1):
4065         if hasattr(Y,"store"):
4066             Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
4067         else:
4068             Ynpu = numpy.ravel( Y ).reshape((__p,1))
4069         #
4070         if U is not None:
4071             if hasattr(U,"store") and len(U)>1:
4072                 Un = numpy.ravel( U[step] ).reshape((-1,1))
4073             elif hasattr(U,"store") and len(U)==1:
4074                 Un = numpy.ravel( U[0] ).reshape((-1,1))
4075             else:
4076                 Un = numpy.ravel( U ).reshape((-1,1))
4077         else:
4078             Un = None
4079         #
4080         Pndemi = numpy.real(scipy.linalg.sqrtm(Pn))
4081         Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
4082         nbSpts = 2*Xn.size+1
4083         #
4084         XEtnnp = []
4085         for point in range(nbSpts):
4086             if selfA._parameters["EstimationOf"] == "State":
4087                 XEtnnpi = numpy.asarray( Mm( (Xnp[:,point], Un) ) ).reshape((-1,1))
4088                 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
4089                     Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
4090                     XEtnnpi = XEtnnpi + Cm @ Un
4091             elif selfA._parameters["EstimationOf"] == "Parameters":
4092                 # --- > Par principe, M = Id, Q = 0
4093                 XEtnnpi = Xnp[:,point]
4094             XEtnnp.append( numpy.ravel(XEtnnpi).reshape((-1,1)) )
4095         XEtnnp = numpy.concatenate( XEtnnp, axis=1 )
4096         #
4097         Xncm = ( XEtnnp * Wm ).sum(axis=1)
4098         #
4099         if selfA._parameters["EstimationOf"] == "State":        Pnm = Q
4100         elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
4101         for point in range(nbSpts):
4102             Pnm += Wc[i] * ((XEtnnp[:,point]-Xncm).reshape((-1,1)) * (XEtnnp[:,point]-Xncm))
4103         #
4104         Pnmdemi = numpy.real(scipy.linalg.sqrtm(Pnm))
4105         #
4106         Xnnp = numpy.hstack([Xncm.reshape((-1,1)), Xncm.reshape((-1,1))+Gamma*Pnmdemi, Xncm.reshape((-1,1))-Gamma*Pnmdemi])
4107         #
4108         Ynnp = []
4109         for point in range(nbSpts):
4110             if selfA._parameters["EstimationOf"] == "State":
4111                 Ynnpi = Hm( (Xnnp[:,point], None) )
4112             elif selfA._parameters["EstimationOf"] == "Parameters":
4113                 Ynnpi = Hm( (Xnnp[:,point], Un) )
4114             Ynnp.append( numpy.ravel(Ynnpi).reshape((-1,1)) )
4115         Ynnp = numpy.concatenate( Ynnp, axis=1 )
4116         #
4117         Yncm = ( Ynnp * Wm ).sum(axis=1)
4118         #
4119         Pyyn = R
4120         Pxyn = 0.
4121         for point in range(nbSpts):
4122             Pyyn += Wc[i] * ((Ynnp[:,point]-Yncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
4123             Pxyn += Wc[i] * ((Xnnp[:,point]-Xncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
4124         #
4125         _Innovation  = Ynpu - Yncm.reshape((-1,1))
4126         if selfA._parameters["EstimationOf"] == "Parameters":
4127             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
4128                 _Innovation = _Innovation - Cm @ Un
4129         #
4130         Kn = Pxyn * Pyyn.I
4131         Xn = Xncm.reshape((-1,1)) + Kn * _Innovation
4132         Pn = Pnm - Kn * Pyyn * Kn.T
4133         #
4134         Xa = Xn # Pointeurs
4135         #--------------------------
4136         selfA._setInternalState("Xn", Xn)
4137         selfA._setInternalState("Pn", Pn)
4138         #--------------------------
4139         #
4140         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
4141         # ---> avec analysis
4142         selfA.StoredVariables["Analysis"].store( Xa )
4143         if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
4144             selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) )
4145         if selfA._toStore("InnovationAtCurrentAnalysis"):
4146             selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
4147         # ---> avec current state
4148         if selfA._parameters["StoreInternalVariables"] \
4149             or selfA._toStore("CurrentState"):
4150             selfA.StoredVariables["CurrentState"].store( Xn )
4151         if selfA._toStore("ForecastState"):
4152             selfA.StoredVariables["ForecastState"].store( Xncm )
4153         if selfA._toStore("ForecastCovariance"):
4154             selfA.StoredVariables["ForecastCovariance"].store( Pnm )
4155         if selfA._toStore("BMA"):
4156             selfA.StoredVariables["BMA"].store( Xncm - Xa )
4157         if selfA._toStore("InnovationAtCurrentState"):
4158             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
4159         if selfA._toStore("SimulatedObservationAtCurrentState") \
4160             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4161             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm )
4162         # ---> autres
4163         if selfA._parameters["StoreInternalVariables"] \
4164             or selfA._toStore("CostFunctionJ") \
4165             or selfA._toStore("CostFunctionJb") \
4166             or selfA._toStore("CostFunctionJo") \
4167             or selfA._toStore("CurrentOptimum") \
4168             or selfA._toStore("APosterioriCovariance"):
4169             Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
4170             Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
4171             J   = Jb + Jo
4172             selfA.StoredVariables["CostFunctionJb"].store( Jb )
4173             selfA.StoredVariables["CostFunctionJo"].store( Jo )
4174             selfA.StoredVariables["CostFunctionJ" ].store( J )
4175             #
4176             if selfA._toStore("IndexOfOptimum") \
4177                 or selfA._toStore("CurrentOptimum") \
4178                 or selfA._toStore("CostFunctionJAtCurrentOptimum") \
4179                 or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
4180                 or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
4181                 or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4182                 IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4183             if selfA._toStore("IndexOfOptimum"):
4184                 selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
4185             if selfA._toStore("CurrentOptimum"):
4186                 selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
4187             if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4188                 selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
4189             if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
4190                 selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
4191             if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
4192                 selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
4193             if selfA._toStore("CostFunctionJAtCurrentOptimum"):
4194                 selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
4195         if selfA._toStore("APosterioriCovariance"):
4196             selfA.StoredVariables["APosterioriCovariance"].store( Pn )
4197         if selfA._parameters["EstimationOf"] == "Parameters" \
4198             and J < previousJMinimum:
4199             previousJMinimum    = J
4200             XaMin               = Xa
4201             if selfA._toStore("APosterioriCovariance"):
4202                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
4203     #
4204     # Stockage final supplémentaire de l'optimum en estimation de paramètres
4205     # ----------------------------------------------------------------------
4206     if selfA._parameters["EstimationOf"] == "Parameters":
4207         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
4208         selfA.StoredVariables["Analysis"].store( XaMin )
4209         if selfA._toStore("APosterioriCovariance"):
4210             selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
4211         if selfA._toStore("BMA"):
4212             selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
4213     #
4214     return 0
4215
4216 # ==============================================================================
4217 def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
4218     """
4219     3DVAR variational analysis with no inversion of B
4220     """
4221     #
4222     # Initialisations
4223     # ---------------
4224     Hm = HO["Direct"].appliedTo
4225     Ha = HO["Adjoint"].appliedInXTo
4226     #
4227     BT = B.getT()
4228     RI = R.getI()
4229     #
4230     Xini = numpy.zeros(Xb.shape)
4231     #
4232     # Définition de la fonction-coût
4233     # ------------------------------
4234     def CostFunction(v):
4235         _V = numpy.ravel( v ).reshape((-1,1))
4236         _X = Xb + B * _V
4237         if selfA._parameters["StoreInternalVariables"] or \
4238             selfA._toStore("CurrentState") or \
4239             selfA._toStore("CurrentOptimum"):
4240             selfA.StoredVariables["CurrentState"].store( _X )
4241         _HX = Hm( _X )
4242         _HX = numpy.ravel( _HX ).reshape((-1,1))
4243         _Innovation = Y - _HX
4244         if selfA._toStore("SimulatedObservationAtCurrentState") or \
4245             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4246             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
4247         if selfA._toStore("InnovationAtCurrentState"):
4248             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
4249         #
4250         Jb  = float( 0.5 * _V.T * (BT * _V) )
4251         Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
4252         J   = Jb + Jo
4253         #
4254         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
4255         selfA.StoredVariables["CostFunctionJb"].store( Jb )
4256         selfA.StoredVariables["CostFunctionJo"].store( Jo )
4257         selfA.StoredVariables["CostFunctionJ" ].store( J )
4258         if selfA._toStore("IndexOfOptimum") or \
4259             selfA._toStore("CurrentOptimum") or \
4260             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
4261             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
4262             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
4263             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4264             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4265         if selfA._toStore("IndexOfOptimum"):
4266             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
4267         if selfA._toStore("CurrentOptimum"):
4268             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
4269         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4270             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
4271         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
4272             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
4273         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
4274             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
4275         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
4276             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
4277         return J
4278     #
4279     def GradientOfCostFunction(v):
4280         _V = v.reshape((-1,1))
4281         _X = Xb + (B @ _V).reshape((-1,1))
4282         _HX     = Hm( _X ).reshape((-1,1))
4283         GradJb  = BT * _V
4284         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
4285         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
4286         return GradJ
4287     #
4288     # Minimisation de la fonctionnelle
4289     # --------------------------------
4290     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
4291     #
4292     if selfA._parameters["Minimizer"] == "LBFGSB":
4293         if "0.19" <= scipy.version.version <= "1.1.0":
4294             import lbfgsbhlt as optimiseur
4295         else:
4296             import scipy.optimize as optimiseur
4297         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
4298             func        = CostFunction,
4299             x0          = Xini,
4300             fprime      = GradientOfCostFunction,
4301             args        = (),
4302             bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
4303             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
4304             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
4305             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
4306             iprint      = selfA._parameters["optiprint"],
4307             )
4308         nfeval = Informations['funcalls']
4309         rc     = Informations['warnflag']
4310     elif selfA._parameters["Minimizer"] == "TNC":
4311         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
4312             func        = CostFunction,
4313             x0          = Xini,
4314             fprime      = GradientOfCostFunction,
4315             args        = (),
4316             bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
4317             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
4318             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
4319             ftol        = selfA._parameters["CostDecrementTolerance"],
4320             messages    = selfA._parameters["optmessages"],
4321             )
4322     elif selfA._parameters["Minimizer"] == "CG":
4323         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
4324             f           = CostFunction,
4325             x0          = Xini,
4326             fprime      = GradientOfCostFunction,
4327             args        = (),
4328             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
4329             gtol        = selfA._parameters["GradientNormTolerance"],
4330             disp        = selfA._parameters["optdisp"],
4331             full_output = True,
4332             )
4333     elif selfA._parameters["Minimizer"] == "NCG":
4334         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
4335             f           = CostFunction,
4336             x0          = Xini,
4337             fprime      = GradientOfCostFunction,
4338             args        = (),
4339             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
4340             avextol     = selfA._parameters["CostDecrementTolerance"],
4341             disp        = selfA._parameters["optdisp"],
4342             full_output = True,
4343             )
4344     elif selfA._parameters["Minimizer"] == "BFGS":
4345         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
4346             f           = CostFunction,
4347             x0          = Xini,
4348             fprime      = GradientOfCostFunction,
4349             args        = (),
4350             maxiter     = selfA._parameters["MaximumNumberOfSteps"],
4351             gtol        = selfA._parameters["GradientNormTolerance"],
4352             disp        = selfA._parameters["optdisp"],
4353             full_output = True,
4354             )
4355     else:
4356         raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
4357     #
4358     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
4359     MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
4360     #
4361     # Correction pour pallier a un bug de TNC sur le retour du Minimum
4362     # ----------------------------------------------------------------
4363     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
4364         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
4365     else:
4366         Minimum = Xb + B * Minimum.reshape((-1,1)) # Pas @
4367     #
4368     Xa = Minimum
4369     #--------------------------
4370     #
4371     selfA.StoredVariables["Analysis"].store( Xa )
4372     #
4373     if selfA._toStore("OMA") or \
4374         selfA._toStore("SigmaObs2") or \
4375         selfA._toStore("SimulationQuantiles") or \
4376         selfA._toStore("SimulatedObservationAtOptimum"):
4377         if selfA._toStore("SimulatedObservationAtCurrentState"):
4378             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
4379         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
4380             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
4381         else:
4382             HXa = Hm( Xa )
4383     #
4384     if selfA._toStore("APosterioriCovariance") or \
4385         selfA._toStore("SimulationQuantiles") or \
4386         selfA._toStore("JacobianMatrixAtOptimum") or \
4387         selfA._toStore("KalmanGainAtOptimum"):
4388         HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
4389         HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
4390     if selfA._toStore("APosterioriCovariance") or \
4391         selfA._toStore("SimulationQuantiles") or \
4392         selfA._toStore("KalmanGainAtOptimum"):
4393         HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
4394         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
4395     if selfA._toStore("APosterioriCovariance") or \
4396         selfA._toStore("SimulationQuantiles"):
4397         BI = B.getI()
4398         A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
4399     if selfA._toStore("APosterioriCovariance"):
4400         selfA.StoredVariables["APosterioriCovariance"].store( A )
4401     if selfA._toStore("JacobianMatrixAtOptimum"):
4402         selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
4403     if selfA._toStore("KalmanGainAtOptimum"):
4404         if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
4405         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
4406         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
4407     #
4408     # Calculs et/ou stockages supplémentaires
4409     # ---------------------------------------
4410     if selfA._toStore("Innovation") or \
4411         selfA._toStore("SigmaObs2") or \
4412         selfA._toStore("MahalanobisConsistency") or \
4413         selfA._toStore("OMB"):
4414         d  = Y - HXb
4415     if selfA._toStore("Innovation"):
4416         selfA.StoredVariables["Innovation"].store( d )
4417     if selfA._toStore("BMA"):
4418         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
4419     if selfA._toStore("OMA"):
4420         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
4421     if selfA._toStore("OMB"):
4422         selfA.StoredVariables["OMB"].store( d )
4423     if selfA._toStore("SigmaObs2"):
4424         TraceR = R.trace(Y.size)
4425         selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
4426     if selfA._toStore("MahalanobisConsistency"):
4427         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
4428     if selfA._toStore("SimulationQuantiles"):
4429         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
4430     if selfA._toStore("SimulatedObservationAtBackground"):
4431         selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
4432     if selfA._toStore("SimulatedObservationAtOptimum"):
4433         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
4434     #
4435     return 0
4436
4437 # ==============================================================================
4438 if __name__ == "__main__":
4439     print('\n AUTODIAGNOSTIC\n')