Salome HOME
Minor source update for OM compatibility
[modules/adao.git] / src / daComposant / daCore / NumericObjects.py
index aa6c683592e0c1a146b1ed356675970ae5d8defd..5903cddbced50da59c738fa77ad20e791ad116ec 100644 (file)
@@ -1,6 +1,6 @@
 # -*- coding: utf-8 -*-
 #
-# Copyright (C) 2008-2022 EDF R&D
+# Copyright (C) 2008-2024 EDF R&D
 #
 # This library is free software; you can redistribute it and/or
 # modify it under the terms of the GNU Lesser General Public
@@ -25,10 +25,10 @@ __doc__ = """
 """
 __author__ = "Jean-Philippe ARGAUD"
 
-import os, time, copy, types, sys, logging
-import math, numpy, scipy, scipy.optimize, scipy.version
+import os, copy, types, sys, logging, math, numpy, scipy, itertools, warnings
+import scipy.linalg  # Py3.6
 from daCore.BasicObjects import Operator, Covariance, PartialAlgorithm
-from daCore.PlatformInfo import PlatformInfo
+from daCore.PlatformInfo import PlatformInfo, vt, vfloat
 mpr = PlatformInfo().MachinePrecision()
 mfp = PlatformInfo().MaximumPrecision()
 # logging.getLogger().setLevel(logging.DEBUG)
@@ -37,11 +37,13 @@ mfp = PlatformInfo().MaximumPrecision()
 def ExecuteFunction( triplet ):
     assert len(triplet) == 3, "Incorrect number of arguments"
     X, xArgs, funcrepr = triplet
-    __X = numpy.ravel( X ).reshape((-1,1))
-    __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
+    __X = numpy.ravel( X ).reshape((-1, 1))
+    __sys_path_tmp = sys.path
+    sys.path.insert(0, funcrepr["__userFunction__path"])
     __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
-    __fonction = getattr(__module,funcrepr["__userFunction__name"])
-    sys.path = __sys_path_tmp ; del __sys_path_tmp
+    __fonction = getattr(__module, funcrepr["__userFunction__name"])
+    sys.path = __sys_path_tmp
+    del __sys_path_tmp
     if isinstance(xArgs, dict):
         __HX  = __fonction( __X, **xArgs )
     else:
@@ -59,27 +61,35 @@ class FDApproximation(object):
     "dX" qui sera multiplié par "increment" (donc en %), et on effectue de DF
     centrées si le booléen "centeredDF" est vrai.
     """
+    __slots__ = (
+        "__name", "__extraArgs", "__mpEnabled", "__mpWorkers", "__mfEnabled",
+        "__rmEnabled", "__avoidRC", "__tolerBP", "__centeredDF", "__lengthRJ",
+        "__listJPCP", "__listJPCI", "__listJPCR", "__listJPPN", "__listJPIN",
+        "__userOperator", "__userFunction", "__increment", "__pool", "__dX",
+        "__userFunction__name", "__userFunction__modl", "__userFunction__path",
+    )
+
     def __init__(self,
-            name                  = "FDApproximation",
-            Function              = None,
-            centeredDF            = False,
-            increment             = 0.01,
-            dX                    = None,
-            extraArguments        = None,
-            reducingMemoryUse     = False,
-            avoidingRedundancy    = True,
-            toleranceInRedundancy = 1.e-18,
-            lenghtOfRedundancy    = -1,
-            mpEnabled             = False,
-            mpWorkers             = None,
-            mfEnabled             = False,
-            ):
+                 name                  = "FDApproximation",
+                 Function              = None,
+                 centeredDF            = False,
+                 increment             = 0.01,
+                 dX                    = None,
+                 extraArguments        = None,
+                 reducingMemoryUse     = False,
+                 avoidingRedundancy    = True,
+                 toleranceInRedundancy = 1.e-18,
+                 lengthOfRedundancy    = -1,
+                 mpEnabled             = False,
+                 mpWorkers             = None,
+                 mfEnabled             = False ):
+        #
         self.__name = str(name)
         self.__extraArgs = extraArguments
         #
         if mpEnabled:
             try:
-                import multiprocessing
+                import multiprocessing  # noqa: F401
                 self.__mpEnabled = True
             except ImportError:
                 self.__mpEnabled = False
@@ -88,7 +98,7 @@ class FDApproximation(object):
         self.__mpWorkers = mpWorkers
         if self.__mpWorkers is not None and self.__mpWorkers < 1:
             self.__mpWorkers = None
-        logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
+        logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled, self.__mpWorkers))
         #
         self.__mfEnabled = bool(mfEnabled)
         logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
@@ -99,12 +109,12 @@ class FDApproximation(object):
         if avoidingRedundancy:
             self.__avoidRC = True
             self.__tolerBP = float(toleranceInRedundancy)
-            self.__lenghtRJ = int(lenghtOfRedundancy)
-            self.__listJPCP = [] # Jacobian Previous Calculated Points
-            self.__listJPCI = [] # Jacobian Previous Calculated Increment
-            self.__listJPCR = [] # Jacobian Previous Calculated Results
-            self.__listJPPN = [] # Jacobian Previous Calculated Point Norms
-            self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
+            self.__lengthRJ = int(lengthOfRedundancy)
+            self.__listJPCP = []  # Jacobian Previous Calculated Points
+            self.__listJPCI = []  # Jacobian Previous Calculated Increment
+            self.__listJPCR = []  # Jacobian Previous Calculated Results
+            self.__listJPPN = []  # Jacobian Previous Calculated Point Norms
+            self.__listJPIN = []  # Jacobian Previous Calculated Increment Norms
         else:
             self.__avoidRC = False
         logging.debug("FDA Calculs avec réduction des doublons : %s"%self.__avoidRC)
@@ -112,38 +122,53 @@ class FDApproximation(object):
             logging.debug("FDA Tolérance de détermination des doublons : %.2e"%self.__tolerBP)
         #
         if self.__mpEnabled:
-            if isinstance(Function,types.FunctionType):
+            if isinstance(Function, types.FunctionType):
                 logging.debug("FDA Calculs en multiprocessing : FunctionType")
                 self.__userFunction__name = Function.__name__
                 try:
-                    mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
-                except:
+                    mod = os.path.join(Function.__globals__['filepath'], Function.__globals__['filename'])
+                except Exception:
                     mod = os.path.abspath(Function.__globals__['__file__'])
                 if not os.path.isfile(mod):
                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
-                self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
+                self.__userFunction__modl = os.path.basename(mod).replace('.pyc', '').replace('.pyo', '').replace('.py', '')
                 self.__userFunction__path = os.path.dirname(mod)
                 del mod
-                self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
-                self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
-            elif isinstance(Function,types.MethodType):
+                self.__userOperator = Operator(
+                    name                 = self.__name,
+                    fromMethod           = Function,
+                    avoidingRedundancy   = self.__avoidRC,
+                    inputAsMultiFunction = self.__mfEnabled,
+                    extraArguments       = self.__extraArgs )
+                self.__userFunction = self.__userOperator.appliedTo  # Pour le calcul Direct
+            elif isinstance(Function, types.MethodType):
                 logging.debug("FDA Calculs en multiprocessing : MethodType")
                 self.__userFunction__name = Function.__name__
                 try:
-                    mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename'])
-                except:
+                    mod = os.path.join(Function.__globals__['filepath'], Function.__globals__['filename'])
+                except Exception:
                     mod = os.path.abspath(Function.__func__.__globals__['__file__'])
                 if not os.path.isfile(mod):
                     raise ImportError("No user defined function or method found with the name %s"%(mod,))
-                self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
+                self.__userFunction__modl = os.path.basename(mod).replace('.pyc', '').replace('.pyo', '').replace('.py', '')
                 self.__userFunction__path = os.path.dirname(mod)
                 del mod
-                self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
-                self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
+                self.__userOperator = Operator(
+                    name                 = self.__name,
+                    fromMethod           = Function,
+                    avoidingRedundancy   = self.__avoidRC,
+                    inputAsMultiFunction = self.__mfEnabled,
+                    extraArguments       = self.__extraArgs )
+                self.__userFunction = self.__userOperator.appliedTo  # Pour le calcul Direct
             else:
                 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
         else:
-            self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
+            self.__userOperator = Operator(
+                name                 = self.__name,
+                fromMethod           = Function,
+                avoidingRedundancy   = self.__avoidRC,
+                inputAsMultiFunction = self.__mfEnabled,
+                extraArguments       = self.__extraArgs )
             self.__userFunction = self.__userOperator.appliedTo
         #
         self.__centeredDF = bool(centeredDF)
@@ -157,19 +182,20 @@ class FDApproximation(object):
             self.__dX     = numpy.ravel( dX )
 
     # ---------------------------------------------------------
-    def __doublon__(self, e, l, n, v=None):
+    def __doublon__(self, __e, __l, __n, __v=None):
         __ac, __iac = False, -1
-        for i in range(len(l)-1,-1,-1):
-            if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]:
+        for i in range(len(__l) - 1, -1, -1):
+            if numpy.linalg.norm(__e - __l[i]) < self.__tolerBP * __n[i]:
                 __ac, __iac = True, i
-                if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac))
+                if __v is not None:
+                    logging.debug("FDA Cas%s déjà calculé, récupération du doublon %i"%(__v, __iac))
                 break
         return __ac, __iac
 
     # ---------------------------------------------------------
     def __listdotwith__(self, __LMatrix, __dotWith = None, __dotTWith = None):
         "Produit incrémental d'une matrice liste de colonnes avec un vecteur"
-        if not isinstance(__LMatrix, (list,tuple)):
+        if not isinstance(__LMatrix, (list, tuple)):
             raise TypeError("Columnwise list matrix has not the proper type: %s"%type(__LMatrix))
         if __dotWith is not None:
             __Idwx = numpy.ravel( __dotWith )
@@ -183,7 +209,7 @@ class FDApproximation(object):
             assert __LMatrix[0].size == _Idwy.size, "Incorrect size of elements"
             __Produit = numpy.zeros(len(__LMatrix))
             for i, col in enumerate(__LMatrix):
-                __Produit[i] = float( _Idwy @ col)
+                __Produit[i] = vfloat( _Idwy @ col)
             return __Produit
         else:
             __Produit = None
@@ -235,7 +261,7 @@ class FDApproximation(object):
         logging.debug("FDA   Incrément de............: %s*X"%float(self.__increment))
         logging.debug("FDA   Approximation centrée...: %s"%(self.__centeredDF))
         #
-        if X is None or len(X)==0:
+        if X is None or len(X) == 0:
             raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
         #
         _X = numpy.ravel( X )
@@ -256,7 +282,7 @@ class FDApproximation(object):
         #
         __alreadyCalculated  = False
         if self.__avoidRC:
-            __bidon, __alreadyCalculatedP = self.__doublon__(_X,  self.__listJPCP, self.__listJPPN, None)
+            __bidon, __alreadyCalculatedP = self.__doublon__( _X, self.__listJPCP, self.__listJPPN, None)
             __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
             if __alreadyCalculatedP == __alreadyCalculatedI > -1:
                 __alreadyCalculated, __i = True, __alreadyCalculatedP
@@ -267,7 +293,7 @@ class FDApproximation(object):
             _Jacobienne = self.__listJPCR[__i]
             logging.debug("FDA Fin du calcul de la Jacobienne")
             if dotWith is not None:
-                return numpy.dot(_Jacobienne,   numpy.ravel( dotWith ))
+                return numpy.dot(  _Jacobienne, numpy.ravel( dotWith ))
             elif dotTWith is not None:
                 return numpy.dot(_Jacobienne.T, numpy.ravel( dotTWith ))
         else:
@@ -276,9 +302,9 @@ class FDApproximation(object):
                 #
                 if self.__mpEnabled and not self.__mfEnabled:
                     funcrepr = {
-                        "__userFunction__path" : self.__userFunction__path,
-                        "__userFunction__modl" : self.__userFunction__modl,
-                        "__userFunction__name" : self.__userFunction__name,
+                        "__userFunction__path": self.__userFunction__path,
+                        "__userFunction__modl": self.__userFunction__modl,
+                        "__userFunction__name": self.__userFunction__name,
                     }
                     _jobs = []
                     for i in range( len(_dX) ):
@@ -288,7 +314,7 @@ class FDApproximation(object):
                         _X_moins_dXi    = numpy.array( _X, dtype=float )
                         _X_moins_dXi[i] = _X[i] - _dXi
                         #
-                        _jobs.append( (_X_plus_dXi,  self.__extraArgs, funcrepr) )
+                        _jobs.append( ( _X_plus_dXi, self.__extraArgs, funcrepr) )
                         _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
                     #
                     import multiprocessing
@@ -299,7 +325,7 @@ class FDApproximation(object):
                     #
                     _Jacobienne  = []
                     for i in range( len(_dX) ):
-                        _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
+                        _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2 * i] - _HX_plusmoins_dX[2 * i + 1] ) / (2. * _dX[i]) )
                     #
                 elif self.__mfEnabled:
                     _xserie = []
@@ -314,10 +340,10 @@ class FDApproximation(object):
                         _xserie.append( _X_moins_dXi )
                     #
                     _HX_plusmoins_dX = self.DirectOperator( _xserie )
-                     #
+                    #
                     _Jacobienne  = []
                     for i in range( len(_dX) ):
-                        _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) )
+                        _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2 * i] - _HX_plusmoins_dX[2 * i + 1] ) / (2. * _dX[i]) )
                     #
                 else:
                     _Jacobienne  = []
@@ -331,15 +357,15 @@ class FDApproximation(object):
                         _HX_plus_dXi    = self.DirectOperator( _X_plus_dXi )
                         _HX_moins_dXi   = self.DirectOperator( _X_moins_dXi )
                         #
-                        _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2.*_dXi) )
+                        _Jacobienne.append( numpy.ravel( _HX_plus_dXi - _HX_moins_dXi ) / (2. * _dXi) )
                 #
             else:
                 #
                 if self.__mpEnabled and not self.__mfEnabled:
                     funcrepr = {
-                        "__userFunction__path" : self.__userFunction__path,
-                        "__userFunction__modl" : self.__userFunction__modl,
-                        "__userFunction__name" : self.__userFunction__name,
+                        "__userFunction__path": self.__userFunction__path,
+                        "__userFunction__modl": self.__userFunction__modl,
+                        "__userFunction__name": self.__userFunction__name,
                     }
                     _jobs = []
                     _jobs.append( (_X, self.__extraArgs, funcrepr) )
@@ -377,7 +403,7 @@ class FDApproximation(object):
                     _Jacobienne = []
                     for i in range( len(_dX) ):
                         _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) )
-                   #
+                    #
                 else:
                     _Jacobienne  = []
                     _HX = self.DirectOperator( _X )
@@ -397,8 +423,9 @@ class FDApproximation(object):
             if __Produit is None or self.__avoidRC:
                 _Jacobienne = numpy.transpose( numpy.vstack( _Jacobienne ) )
                 if self.__avoidRC:
-                    if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
-                    while len(self.__listJPCP) > self.__lenghtRJ:
+                    if self.__lengthRJ < 0:
+                        self.__lengthRJ = 2 * _X.size
+                    while len(self.__listJPCP) > self.__lengthRJ:
                         self.__listJPCP.pop(0)
                         self.__listJPCI.pop(0)
                         self.__listJPCR.pop(0)
@@ -436,15 +463,19 @@ class FDApproximation(object):
             # Calcul de la forme matricielle si le second argument est None
             # -------------------------------------------------------------
             _Jacobienne = self.TangentMatrix( X )
-            if self.__mfEnabled: return [_Jacobienne,]
-            else:                return _Jacobienne
+            if self.__mfEnabled:
+                return [_Jacobienne,]
+            else:
+                return _Jacobienne
         else:
             #
             # Calcul de la valeur linéarisée de H en X appliqué à dX
             # ------------------------------------------------------
             _HtX = self.TangentMatrix( X, dotWith = dX )
-            if self.__mfEnabled: return [_HtX,]
-            else:                return _HtX
+            if self.__mfEnabled:
+                return [_HtX,]
+            else:
+                return _HtX
 
     # ---------------------------------------------------------
     def AdjointOperator(self, paire, **extraArgs ):
@@ -467,95 +498,132 @@ class FDApproximation(object):
             # Calcul de la forme matricielle si le second argument est None
             # -------------------------------------------------------------
             _JacobienneT = self.TangentMatrix( X ).T
-            if self.__mfEnabled: return [_JacobienneT,]
-            else:                return _JacobienneT
+            if self.__mfEnabled:
+                return [_JacobienneT,]
+            else:
+                return _JacobienneT
         else:
             #
             # Calcul de la valeur de l'adjoint en X appliqué à Y
             # --------------------------------------------------
             _HaY = self.TangentMatrix( X, dotTWith = Y )
-            if self.__mfEnabled: return [_HaY,]
-            else:                return _HaY
+            if self.__mfEnabled:
+                return [_HaY,]
+            else:
+                return _HaY
+
+# ==============================================================================
+def SetInitialDirection( __Direction = [], __Amplitude = 1., __Position = None ):
+    "Établit ou élabore une direction avec une amplitude"
+    #
+    if len(__Direction) == 0 and __Position is None:
+        raise ValueError("If initial direction is void, current position has to be given")
+    if abs(float(__Amplitude)) < mpr:
+        raise ValueError("Amplitude of perturbation can not be zero")
+    #
+    if len(__Direction) > 0:
+        __dX0 = numpy.asarray(__Direction)
+    else:
+        __dX0 = []
+        __X0 = numpy.ravel(numpy.asarray(__Position))
+        __mX0 = numpy.mean( __X0, dtype=mfp )
+        if abs(__mX0) < 2 * mpr:
+            __mX0 = 1.  # Évite le problème de position nulle
+        for v in __X0:
+            if abs(v) > 1.e-8:
+                __dX0.append( numpy.random.normal(0., abs(v)) )
+            else:
+                __dX0.append( numpy.random.normal(0., __mX0) )
+    #
+    __dX0 = numpy.asarray(__dX0, float)  # Évite le problème d'array de taille 1
+    __dX0 = numpy.ravel( __dX0 )         # Redresse les vecteurs
+    __dX0 = float(__Amplitude) * __dX0
+    #
+    return __dX0
 
 # ==============================================================================
-def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
-    "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
+def EnsembleOfCenteredPerturbations( __bgCenter, __bgCovariance, __nbMembers ):
+    "Génération d'un ensemble de taille __nbMembers-1 d'états aléatoires centrés"
     #
-    _bgcenter = numpy.ravel(_bgcenter)[:,None]
-    if _nbmembers < 1:
-        raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
+    __bgCenter = numpy.ravel(__bgCenter)[:, None]
+    if __nbMembers < 1:
+        raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(__nbMembers),))
     #
-    if _bgcovariance is None:
-        _Perturbations = numpy.tile( _bgcenter, _nbmembers)
+    if __bgCovariance is None:
+        _Perturbations = numpy.tile( __bgCenter, __nbMembers)
     else:
-        _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
-        _Perturbations = numpy.tile( _bgcenter, _nbmembers) + _Z
+        _Z = numpy.random.multivariate_normal(numpy.zeros(__bgCenter.size), __bgCovariance, size=__nbMembers).T
+        _Perturbations = numpy.tile( __bgCenter, __nbMembers) + _Z
     #
     return _Perturbations
 
 # ==============================================================================
-def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
-    "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
+def EnsembleOfBackgroundPerturbations(
+        __bgCenter,
+        __bgCovariance,
+        __nbMembers,
+        __withSVD = True ):
+    "Génération d'un ensemble de taille __nbMembers-1 d'états aléatoires centrés"
     def __CenteredRandomAnomalies(Zr, N):
         """
         Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
         notes manuscrites de MB et conforme au code de PS avec eps = -1
         """
         eps = -1
-        Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
-        Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
-        R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
-        Q = numpy.dot(Q,R)
-        Zr = numpy.dot(Q,Zr)
+        Q = numpy.identity(N - 1) - numpy.ones((N - 1, N - 1)) / numpy.sqrt(N) / (numpy.sqrt(N) - eps)
+        Q = numpy.concatenate((Q, [eps * numpy.ones(N - 1) / numpy.sqrt(N)]), axis=0)
+        R, _ = numpy.linalg.qr(numpy.random.normal(size = (N - 1, N - 1)))
+        Q = numpy.dot(Q, R)
+        Zr = numpy.dot(Q, Zr)
         return Zr.T
     #
-    _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
-    if _nbmembers < 1:
-        raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
-    if _bgcovariance is None:
-        _Perturbations = numpy.tile( _bgcenter, _nbmembers)
+    __bgCenter = numpy.ravel(__bgCenter).reshape((-1, 1))
+    if __nbMembers < 1:
+        raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(__nbMembers),))
+    if __bgCovariance is None:
+        _Perturbations = numpy.tile( __bgCenter, __nbMembers)
     else:
-        if _withSVD:
-            _U, _s, _V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
-            _nbctl = _bgcenter.size
-            if _nbmembers > _nbctl:
+        if __withSVD:
+            _U, _s, _V = numpy.linalg.svd(__bgCovariance, full_matrices=False)
+            _nbctl = __bgCenter.size
+            if __nbMembers > _nbctl:
                 _Z = numpy.concatenate((numpy.dot(
                     numpy.diag(numpy.sqrt(_s[:_nbctl])), _V[:_nbctl]),
-                    numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
+                    numpy.random.multivariate_normal(numpy.zeros(_nbctl), __bgCovariance, __nbMembers - 1 - _nbctl)), axis = 0)
             else:
-                _Z = numpy.dot(numpy.diag(numpy.sqrt(_s[:_nbmembers-1])), _V[:_nbmembers-1])
-            _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
-            _Perturbations = _bgcenter + _Zca
+                _Z = numpy.dot(numpy.diag(numpy.sqrt(_s[:__nbMembers - 1])), _V[:__nbMembers - 1])
+            _Zca = __CenteredRandomAnomalies(_Z, __nbMembers)
+            _Perturbations = __bgCenter + _Zca
         else:
-            if max(abs(_bgcovariance.flatten())) > 0:
-                _nbctl = _bgcenter.size
-                _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
-                _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
-                _Perturbations = _bgcenter + _Zca
+            if max(abs(__bgCovariance.flatten())) > 0:
+                _nbctl = __bgCenter.size
+                _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl), __bgCovariance, __nbMembers - 1)
+                _Zca = __CenteredRandomAnomalies(_Z, __nbMembers)
+                _Perturbations = __bgCenter + _Zca
             else:
-                _Perturbations = numpy.tile( _bgcenter, _nbmembers)
+                _Perturbations = numpy.tile( __bgCenter, __nbMembers)
     #
     return _Perturbations
 
 # ==============================================================================
 def EnsembleMean( __Ensemble ):
     "Renvoie la moyenne empirique d'un ensemble"
-    return numpy.asarray(__Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
+    return numpy.asarray(__Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1, 1))
 
 # ==============================================================================
-def EnsembleOfAnomalies( __Ensemble, __OptMean = None, __Normalisation = 1.):
+def EnsembleOfAnomalies( __Ensemble, __OptMean = None, __Normalisation = 1. ):
     "Renvoie les anomalies centrées à partir d'un ensemble"
     if __OptMean is None:
         __Em = EnsembleMean( __Ensemble )
     else:
-        __Em = numpy.ravel( __OptMean ).reshape((-1,1))
+        __Em = numpy.ravel( __OptMean ).reshape((-1, 1))
     #
     return __Normalisation * (numpy.asarray( __Ensemble ) - __Em)
 
 # ==============================================================================
-def EnsembleErrorCovariance( __Ensemble, __quick = False ):
+def EnsembleErrorCovariance( __Ensemble, __Quick = False ):
     "Renvoie l'estimation empirique de la covariance d'ensemble"
-    if __quick:
+    if __Quick:
         # Covariance rapide mais rarement définie positive
         __Covariance = numpy.cov( __Ensemble )
     else:
@@ -563,27 +631,172 @@ def EnsembleErrorCovariance( __Ensemble, __quick = False ):
         __n, __m = numpy.asarray( __Ensemble ).shape
         __Anomalies = EnsembleOfAnomalies( __Ensemble )
         # Estimation empirique
-        __Covariance = ( __Anomalies @ __Anomalies.T ) / (__m-1)
+        __Covariance = ( __Anomalies @ __Anomalies.T ) / (__m - 1)
         # Assure la symétrie
         __Covariance = ( __Covariance + __Covariance.T ) * 0.5
         # Assure la positivité
-        __epsilon    = mpr*numpy.trace( __Covariance )
+        __epsilon    = mpr * numpy.trace( __Covariance )
         __Covariance = __Covariance + __epsilon * numpy.identity(__n)
     #
     return __Covariance
 
 # ==============================================================================
-def EnsemblePerturbationWithGivenCovariance( __Ensemble, __Covariance, __Seed=None ):
+def SingularValuesEstimation( __Ensemble, __Using = "SVDVALS"):
+    "Renvoie les valeurs singulières de l'ensemble et leur carré"
+    if __Using == "SVDVALS":  # Recommandé
+        __sv   = scipy.linalg.svdvals( __Ensemble )
+        __svsq = __sv**2
+    elif __Using == "SVD":
+        _, __sv, _ = numpy.linalg.svd( __Ensemble )
+        __svsq = __sv**2
+    elif __Using == "EIG":  # Lent
+        __eva, __eve = numpy.linalg.eig( __Ensemble @ __Ensemble.T )
+        __svsq = numpy.sort(numpy.abs(numpy.real( __eva )))[::-1]
+        __sv   = numpy.sqrt( __svsq )
+    elif __Using == "EIGH":
+        __eva, __eve = numpy.linalg.eigh( __Ensemble @ __Ensemble.T )
+        __svsq = numpy.sort(numpy.abs(numpy.real( __eva )))[::-1]
+        __sv   = numpy.sqrt( __svsq )
+    elif __Using == "EIGVALS":
+        __eva  = numpy.linalg.eigvals( __Ensemble @ __Ensemble.T )
+        __svsq = numpy.sort(numpy.abs(numpy.real( __eva )))[::-1]
+        __sv   = numpy.sqrt( __svsq )
+    elif __Using == "EIGVALSH":
+        __eva = numpy.linalg.eigvalsh( __Ensemble @ __Ensemble.T )
+        __svsq = numpy.sort(numpy.abs(numpy.real( __eva )))[::-1]
+        __sv   = numpy.sqrt( __svsq )
+    else:
+        raise ValueError("Error in requested variant name: %s"%__Using)
+    #
+    __tisv = __svsq / __svsq.sum()
+    __qisv = 1. - __svsq.cumsum() / __svsq.sum()
+    # Différence à 1.e-16 : __qisv = 1. - __tisv.cumsum()
+    #
+    return __sv, __svsq, __tisv, __qisv
+
+# ==============================================================================
+def MaxL2NormByColumn(__Ensemble, __LcCsts = False, __IncludedPoints = []):
+    "Maximum des normes L2 calculées par colonne"
+    if __LcCsts and len(__IncludedPoints) > 0:
+        normes = numpy.linalg.norm(
+            numpy.take(__Ensemble, __IncludedPoints, axis=0, mode='clip'),
+            axis = 0,
+        )
+    else:
+        normes = numpy.linalg.norm( __Ensemble, axis = 0)
+    nmax = numpy.max(normes)
+    imax = numpy.argmax(normes)
+    return nmax, imax, normes
+
+def MaxLinfNormByColumn(__Ensemble, __LcCsts = False, __IncludedPoints = []):
+    "Maximum des normes Linf calculées par colonne"
+    if __LcCsts and len(__IncludedPoints) > 0:
+        normes = numpy.linalg.norm(
+            numpy.take(__Ensemble, __IncludedPoints, axis=0, mode='clip'),
+            axis = 0, ord=numpy.inf,
+        )
+    else:
+        normes = numpy.linalg.norm( __Ensemble, axis = 0, ord=numpy.inf)
+    nmax = numpy.max(normes)
+    imax = numpy.argmax(normes)
+    return nmax, imax, normes
+
+def InterpolationErrorByColumn(
+        __Ensemble = None, __Basis = None, __Points = None, __M = 2,  # Usage 1
+        __Differences = None,                                         # Usage 2
+        __ErrorNorm = None,                                           # Commun
+        __LcCsts = False, __IncludedPoints = [],                      # Commun
+        __CDM = False,  # ComputeMaxDifference                        # Commun
+        __RMU = False,  # ReduceMemoryUse                             # Commun
+        __FTL = False,  # ForceTril                                   # Commun
+        ):   # noqa: E123
+    "Analyse des normes d'erreurs d'interpolation calculées par colonne"
+    if __ErrorNorm == "L2":
+        NormByColumn = MaxL2NormByColumn
+    else:
+        NormByColumn = MaxLinfNormByColumn
+    #
+    if __Differences is None and not __RMU:  # Usage 1
+        if __FTL:
+            rBasis = numpy.tril( __Basis[__Points, :] )
+        else:
+            rBasis = __Basis[__Points, :]
+        rEnsemble = __Ensemble[__Points, :]
+        #
+        if __M > 1:
+            rBasis_inv = numpy.linalg.inv(rBasis)
+            Interpolator = numpy.dot(__Basis, numpy.dot(rBasis_inv, rEnsemble))
+        else:
+            rBasis_inv = 1. / rBasis
+            Interpolator = numpy.outer(__Basis, numpy.outer(rBasis_inv, rEnsemble))
+        #
+        differences = __Ensemble - Interpolator
+        #
+        error, nbr, _ = NormByColumn(differences, __LcCsts, __IncludedPoints)
+        #
+        if __CDM:
+            maxDifference = differences[:, nbr]
+        #
+    elif __Differences is None and __RMU:  # Usage 1
+        if __FTL:
+            rBasis = numpy.tril( __Basis[__Points, :] )
+        else:
+            rBasis = __Basis[__Points, :]
+        rEnsemble = __Ensemble[__Points, :]
+        #
+        if __M > 1:
+            rBasis_inv = numpy.linalg.inv(rBasis)
+            rCoordinates = numpy.dot(rBasis_inv, rEnsemble)
+        else:
+            rBasis_inv = 1. / rBasis
+            rCoordinates = numpy.outer(rBasis_inv, rEnsemble)
+        #
+        error = 0.
+        nbr = -1
+        for iCol in range(__Ensemble.shape[1]):
+            if __M > 1:
+                iDifference = __Ensemble[:, iCol] - numpy.dot(__Basis, rCoordinates[:, iCol])
+            else:
+                iDifference = __Ensemble[:, iCol] - numpy.ravel(numpy.outer(__Basis, rCoordinates[:, iCol]))
+            #
+            normDifference, _, _ = NormByColumn(iDifference, __LcCsts, __IncludedPoints)
+            #
+            if normDifference > error:
+                error         = normDifference
+                nbr           = iCol
+        #
+        if __CDM:
+            maxDifference = __Ensemble[:, nbr] - numpy.dot(__Basis, rCoordinates[:, nbr])
+        #
+    else:  # Usage 2
+        differences = __Differences
+        #
+        error, nbr, _ = NormByColumn(differences, __LcCsts, __IncludedPoints)
+        #
+        if __CDM:
+            # faire cette variable intermédiaire coûte cher
+            maxDifference = differences[:, nbr]
+    #
+    if __CDM:
+        return error, nbr, maxDifference
+    else:
+        return error, nbr
+
+# ==============================================================================
+def EnsemblePerturbationWithGivenCovariance(
+        __Ensemble,
+        __Covariance,
+        __Seed = None ):
     "Ajout d'une perturbation à chaque membre d'un ensemble selon une covariance prescrite"
-    if hasattr(__Covariance,"assparsematrix"):
-        if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance.assparsematrix())/abs(__Ensemble).mean() < mpr).all():
+    if hasattr(__Covariance, "assparsematrix"):
+        if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance.assparsematrix()) / abs(__Ensemble).mean() < mpr).all():
             # Traitement d'une covariance nulle ou presque
             return __Ensemble
         if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance.assparsematrix()) < mpr).all():
             # Traitement d'une covariance nulle ou presque
             return __Ensemble
     else:
-        if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance)/abs(__Ensemble).mean() < mpr).all():
+        if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance) / abs(__Ensemble).mean() < mpr).all():
             # Traitement d'une covariance nulle ou presque
             return __Ensemble
         if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance) < mpr).all():
@@ -591,21 +804,22 @@ def EnsemblePerturbationWithGivenCovariance( __Ensemble, __Covariance, __Seed=No
             return __Ensemble
     #
     __n, __m = __Ensemble.shape
-    if __Seed is not None: numpy.random.seed(__Seed)
+    if __Seed is not None:
+        numpy.random.seed(__Seed)
     #
-    if hasattr(__Covariance,"isscalar") and __Covariance.isscalar():
+    if hasattr(__Covariance, "isscalar") and __Covariance.isscalar():
         # Traitement d'une covariance multiple de l'identité
         __zero = 0.
         __std  = numpy.sqrt(__Covariance.assparsematrix())
-        __Ensemble += numpy.random.normal(__zero, __std, size=(__m,__n)).T
+        __Ensemble += numpy.random.normal(__zero, __std, size=(__m, __n)).T
     #
-    elif hasattr(__Covariance,"isvector") and __Covariance.isvector():
+    elif hasattr(__Covariance, "isvector") and __Covariance.isvector():
         # Traitement d'une covariance diagonale avec variances non identiques
         __zero = numpy.zeros(__n)
         __std  = numpy.sqrt(__Covariance.assparsematrix())
         __Ensemble += numpy.asarray([numpy.random.normal(__zero, __std) for i in range(__m)]).T
     #
-    elif hasattr(__Covariance,"ismatrix") and __Covariance.ismatrix():
+    elif hasattr(__Covariance, "ismatrix") and __Covariance.ismatrix():
         # Traitement d'une covariance pleine
         __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance.asfullmatrix(__n), size=__m).T
     #
@@ -620,147 +834,172 @@ def EnsemblePerturbationWithGivenCovariance( __Ensemble, __Covariance, __Seed=No
 
 # ==============================================================================
 def CovarianceInflation(
-        InputCovOrEns,
-        InflationType   = None,
-        InflationFactor = None,
-        BackgroundCov   = None,
-        ):
+        __InputCovOrEns,
+        __InflationType   = None,
+        __InflationFactor = None,
+        __BackgroundCov   = None ):
     """
     Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
 
     Synthèse : Hunt 2007, section 2.3.5
     """
-    if InflationFactor is None:
-        return InputCovOrEns
+    if __InflationFactor is None:
+        return __InputCovOrEns
     else:
-        InflationFactor = float(InflationFactor)
+        __InflationFactor = float(__InflationFactor)
     #
-    if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
-        if InflationFactor < 1.:
-            raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
-        if InflationFactor < 1.+mpr:
-            return InputCovOrEns
-        OutputCovOrEns = InflationFactor**2 * InputCovOrEns
+    __InputCovOrEns = numpy.asarray(__InputCovOrEns)
+    if __InputCovOrEns.size == 0:
+        return __InputCovOrEns
     #
-    elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
-        if InflationFactor < 1.:
+    if __InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
+        if __InflationFactor < 1.:
             raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
-        if InflationFactor < 1.+mpr:
-            return InputCovOrEns
-        InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
-        OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
-            + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
+        if __InflationFactor < 1. + mpr:  # No inflation = 1
+            return __InputCovOrEns
+        __OutputCovOrEns = __InflationFactor**2 * __InputCovOrEns
     #
-    elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
-        if InflationFactor < 0.:
+    elif __InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
+        if __InflationFactor < 1.:
+            raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
+        if __InflationFactor < 1. + mpr:  # No inflation = 1
+            return __InputCovOrEns
+        __InputCovOrEnsMean = __InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
+        __OutputCovOrEns = __InputCovOrEnsMean[:, numpy.newaxis] \
+            + __InflationFactor * (__InputCovOrEns - __InputCovOrEnsMean[:, numpy.newaxis])
+    #
+    elif __InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
+        if __InflationFactor < 0.:
             raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
-        if InflationFactor < mpr:
-            return InputCovOrEns
-        __n, __m = numpy.asarray(InputCovOrEns).shape
+        if __InflationFactor < mpr:  # No inflation = 0
+            return __InputCovOrEns
+        __n, __m = __InputCovOrEns.shape
         if __n != __m:
             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
-        OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
+        __tr = __InputCovOrEns.trace() / __n
+        if __InflationFactor > __tr:
+            raise ValueError("Inflation factor for additive inflation has to be small over %.0e."%__tr)
+        __OutputCovOrEns = (1. - __InflationFactor) * __InputCovOrEns + __InflationFactor * numpy.identity(__n)
     #
-    elif InflationType == "HybridOnBackgroundCovariance":
-        if InflationFactor < 0.:
+    elif __InflationType == "HybridOnBackgroundCovariance":
+        if __InflationFactor < 0.:
             raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
-        if InflationFactor < mpr:
-            return InputCovOrEns
-        __n, __m = numpy.asarray(InputCovOrEns).shape
+        if __InflationFactor < mpr:  # No inflation = 0
+            return __InputCovOrEns
+        __n, __m = __InputCovOrEns.shape
         if __n != __m:
             raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
-        if BackgroundCov is None:
+        if __BackgroundCov is None:
             raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
-        if InputCovOrEns.shape != BackgroundCov.shape:
+        if __InputCovOrEns.shape != __BackgroundCov.shape:
             raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
-        OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
+        __OutputCovOrEns = (1. - __InflationFactor) * __InputCovOrEns + __InflationFactor * __BackgroundCov
     #
-    elif InflationType == "Relaxation":
-        raise NotImplementedError("InflationType Relaxation")
+    elif __InflationType == "Relaxation":
+        raise NotImplementedError("Relaxation inflation type not implemented")
     #
     else:
-        raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
+        raise ValueError("Error in inflation type, '%s' is not a valid keyword."%__InflationType)
     #
-    return OutputCovOrEns
+    return __OutputCovOrEns
 
 # ==============================================================================
-def HessienneEstimation(nb, HaM, HtM, BI, RI):
+def HessienneEstimation( __selfA, __nb, __HaM, __HtM, __BI, __RI ):
     "Estimation de la Hessienne"
     #
-    HessienneI = []
-    for i in range(int(nb)):
-        _ee    = numpy.zeros((nb,1))
-        _ee[i] = 1.
-        _HtEE  = numpy.dot(HtM,_ee).reshape((-1,1))
-        HessienneI.append( numpy.ravel( BI * _ee + HaM * (RI * _HtEE) ) )
-    #
-    A = numpy.linalg.inv(numpy.array( HessienneI ))
-    #
-    if min(A.shape) != max(A.shape):
-        raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
-    if (numpy.diag(A) < 0).any():
-        raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
-    if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
+    __HessienneI = []
+    for i in range(int(__nb)):
+        __ee    = numpy.zeros((__nb, 1))
+        __ee[i] = 1.
+        __HtEE  = numpy.dot(__HtM, __ee).reshape((-1, 1))
+        __HessienneI.append( numpy.ravel( __BI * __ee + __HaM * (__RI * __HtEE) ) )
+    #
+    __A = numpy.linalg.inv(numpy.array( __HessienneI ))
+    __A = (__A + __A.T) * 0.5  # Symétrie
+    __A = __A + mpr * numpy.trace( __A ) * numpy.identity(__nb)  # Positivité
+    #
+    if min(__A.shape) != max(__A.shape):
+        raise ValueError(
+            "The %s a posteriori covariance matrix A"%(__selfA._name,) + \
+            " is of shape %s, despites it has to be a"%(str(__A.shape),) + \
+            " squared matrix. There is an error in the observation operator," + \
+            " please check it.")
+    if (numpy.diag(__A) < 0).any():
+        raise ValueError(
+            "The %s a posteriori covariance matrix A"%(__selfA._name,) + \
+            " has at least one negative value on its diagonal. There is an" + \
+            " error in the observation operator, please check it.")
+    if logging.getLogger().level < logging.WARNING:  # La vérification n'a lieu qu'en debug
         try:
-            L = numpy.linalg.cholesky( A )
-        except:
-            raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
-    #
-    return A
+            numpy.linalg.cholesky( __A )
+            logging.debug("%s La matrice de covariance a posteriori A est bien symétrique définie positive."%(__selfA._name,))
+        except Exception:
+            raise ValueError(
+                "The %s a posteriori covariance matrix A"%(__selfA._name,) + \
+                " is not symmetric positive-definite. Please check your a" + \
+                " priori covariances and your observation operator.")
+    #
+    return __A
 
 # ==============================================================================
-def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None):
-    "Estimation des quantiles a posteriori (selfA est modifié)"
+def QuantilesEstimations( selfA, A, Xa, HXa = None, Hm = None, HtM = None ):
+    "Estimation des quantiles a posteriori à partir de A>0 (selfA est modifié)"
     nbsamples = selfA._parameters["NumberOfSamplesForQuantiles"]
     #
     # Traitement des bornes
     if "StateBoundsForQuantiles" in selfA._parameters:
-        LBounds = selfA._parameters["StateBoundsForQuantiles"] # Prioritaire
+        LBounds = selfA._parameters["StateBoundsForQuantiles"]  # Prioritaire
     elif "Bounds" in selfA._parameters:
         LBounds = selfA._parameters["Bounds"]  # Défaut raisonnable
     else:
         LBounds = None
     if LBounds is not None:
         LBounds = ForceNumericBounds( LBounds )
-    _Xa = numpy.ravel(Xa)
+    __Xa = numpy.ravel(Xa)
     #
     # Échantillonnage des états
     YfQ  = None
     EXr  = None
     for i in range(nbsamples):
         if selfA._parameters["SimulationForQuantiles"] == "Linear" and HtM is not None and HXa is not None:
-            dXr = (numpy.random.multivariate_normal(_Xa,A) - _Xa).reshape((-1,1))
-            if LBounds is not None: # "EstimateProjection" par défaut
-                dXr = numpy.max(numpy.hstack((dXr,LBounds[:,0].reshape((-1,1))) - Xa),axis=1)
-                dXr = numpy.min(numpy.hstack((dXr,LBounds[:,1].reshape((-1,1))) - Xa),axis=1)
+            dXr = (numpy.random.multivariate_normal(__Xa, A) - __Xa).reshape((-1, 1))
+            if LBounds is not None:  # "EstimateProjection" par défaut
+                dXr = numpy.max(numpy.hstack((dXr, LBounds[:, 0].reshape((-1, 1))) - __Xa.reshape((-1, 1))), axis=1)
+                dXr = numpy.min(numpy.hstack((dXr, LBounds[:, 1].reshape((-1, 1))) - __Xa.reshape((-1, 1))), axis=1)
             dYr = HtM @ dXr
-            Yr = HXa.reshape((-1,1)) + dYr
-            if selfA._toStore("SampledStateForQuantiles"): Xr = _Xa + numpy.ravel(dXr)
+            Yr = HXa.reshape((-1, 1)) + dYr
+            if selfA._toStore("SampledStateForQuantiles"):
+                Xr = __Xa + numpy.ravel(dXr)
         elif selfA._parameters["SimulationForQuantiles"] == "NonLinear" and Hm is not None:
-            Xr = numpy.random.multivariate_normal(_Xa,A)
-            if LBounds is not None: # "EstimateProjection" par défaut
-                Xr = numpy.max(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,0].reshape((-1,1)))),axis=1)
-                Xr = numpy.min(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,1].reshape((-1,1)))),axis=1)
+            Xr = numpy.random.multivariate_normal(__Xa, A)
+            if LBounds is not None:  # "EstimateProjection" par défaut
+                Xr = numpy.max(numpy.hstack((Xr.reshape((-1, 1)), LBounds[:, 0].reshape((-1, 1)))), axis=1)
+                Xr = numpy.min(numpy.hstack((Xr.reshape((-1, 1)), LBounds[:, 1].reshape((-1, 1)))), axis=1)
             Yr = numpy.asarray(Hm( Xr ))
         else:
             raise ValueError("Quantile simulations has only to be Linear or NonLinear.")
         #
         if YfQ is None:
-            YfQ = Yr.reshape((-1,1))
-            if selfA._toStore("SampledStateForQuantiles"): EXr = Xr.reshape((-1,1))
+            YfQ = Yr.reshape((-1, 1))
+            if selfA._toStore("SampledStateForQuantiles"):
+                EXr = Xr.reshape((-1, 1))
         else:
-            YfQ = numpy.hstack((YfQ,Yr.reshape((-1,1))))
-            if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.hstack((EXr,Xr.reshape((-1,1))))
+            YfQ = numpy.hstack((YfQ, Yr.reshape((-1, 1))))
+            if selfA._toStore("SampledStateForQuantiles"):
+                EXr = numpy.hstack((EXr, Xr.reshape((-1, 1))))
     #
     # Extraction des quantiles
     YfQ.sort(axis=-1)
     YQ = None
     for quantile in selfA._parameters["Quantiles"]:
-        if not (0. <= float(quantile) <= 1.): continue
-        indice = int(nbsamples * float(quantile) - 1./nbsamples)
-        if YQ is None: YQ = YfQ[:,indice].reshape((-1,1))
-        else:          YQ = numpy.hstack((YQ,YfQ[:,indice].reshape((-1,1))))
-    if YQ is not None: # Liste non vide de quantiles
+        if not (0. <= float(quantile) <= 1.):
+            continue
+        indice = int(nbsamples * float(quantile) - 1. / nbsamples)
+        if YQ is None:
+            YQ = YfQ[:, indice].reshape((-1, 1))
+        else:
+            YQ = numpy.hstack((YQ, YfQ[:, indice].reshape((-1, 1))))
+    if YQ is not None:  # Liste non vide de quantiles
         selfA.StoredVariables["SimulationQuantiles"].store( YQ )
     if selfA._toStore("SampledStateForQuantiles"):
         selfA.StoredVariables["SampledStateForQuantiles"].store( EXr )
@@ -768,65 +1007,93 @@ def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None):
     return 0
 
 # ==============================================================================
-def ForceNumericBounds( __Bounds ):
+def ForceNumericBounds( __Bounds, __infNumbers = True ):
     "Force les bornes à être des valeurs numériques, sauf si globalement None"
     # Conserve une valeur par défaut à None s'il n'y a pas de bornes
-    if __Bounds is None: return None
-    # Converti toutes les bornes individuelles None à +/- l'infini
-    __Bounds = numpy.asarray( __Bounds, dtype=float )
-    if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
-        raise ValueError("Incorrectly shaped bounds data")
-    __Bounds[numpy.isnan(__Bounds[:,0]),0] = -sys.float_info.max
-    __Bounds[numpy.isnan(__Bounds[:,1]),1] =  sys.float_info.max
+    if __Bounds is None:
+        return None
+    #
+    # Converti toutes les bornes individuelles None à +/- l'infini chiffré
+    __Bounds = numpy.asarray( __Bounds, dtype=float ).reshape((-1, 2))
+    if len(__Bounds.shape) != 2 or __Bounds.shape[0] == 0 or __Bounds.shape[1] != 2:
+        raise ValueError("Incorrectly shaped bounds data (effective shape is %s)"%(__Bounds.shape,))
+    if __infNumbers:
+        __Bounds[numpy.isnan(__Bounds[:, 0]), 0] = -float('inf')
+        __Bounds[numpy.isnan(__Bounds[:, 1]), 1] = float('inf')
+    else:
+        __Bounds[numpy.isnan(__Bounds[:, 0]), 0] = -sys.float_info.max
+        __Bounds[numpy.isnan(__Bounds[:, 1]), 1] = sys.float_info.max
     return __Bounds
 
 # ==============================================================================
-def RecentredBounds( __Bounds, __Center):
+def RecentredBounds( __Bounds, __Center, __Scale = None ):
     "Recentre les bornes autour de 0, sauf si globalement None"
     # Conserve une valeur par défaut à None s'il n'y a pas de bornes
-    if __Bounds is None: return None
-    # Recentre les valeurs numériques de bornes
-    return ForceNumericBounds( __Bounds ) - numpy.ravel( __Center ).reshape((-1,1))
+    if __Bounds is None:
+        return None
+    #
+    if __Scale is None:
+        # Recentre les valeurs numériques de bornes
+        return ForceNumericBounds( __Bounds ) - numpy.ravel( __Center ).reshape((-1, 1))
+    else:
+        # Recentre les valeurs numériques de bornes et change l'échelle par une matrice
+        return __Scale @ (ForceNumericBounds( __Bounds, False ) - numpy.ravel( __Center ).reshape((-1, 1)))
 
 # ==============================================================================
-def ApplyBounds( __Vector, __Bounds, __newClip = True):
+def ApplyBounds( __Vector, __Bounds, __newClip = True ):
     "Applique des bornes numériques à un point"
     # Conserve une valeur par défaut s'il n'y a pas de bornes
-    if __Bounds is None: return __Vector
+    if __Bounds is None:
+        return __Vector
     #
-    if not isinstance(__Vector, numpy.ndarray): # Is an array
+    if not isinstance(__Vector, numpy.ndarray):  # Is an array
         raise ValueError("Incorrect array definition of vector data")
-    if not isinstance(__Bounds, numpy.ndarray): # Is an array
+    if not isinstance(__Bounds, numpy.ndarray):  # Is an array
         raise ValueError("Incorrect array definition of bounds data")
-    if 2*__Vector.size != __Bounds.size: # Is a 2 column array of vector lenght
-        raise ValueError("Incorrect bounds number (%i) to be applied for this vector (of size %i)"%(__Bounds.size,__Vector.size))
+    if 2 * __Vector.size != __Bounds.size:  # Is a 2 column array of vector length
+        raise ValueError("Incorrect bounds number (%i) to be applied for this vector (of size %i)"%(__Bounds.size, __Vector.size))
     if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
         raise ValueError("Incorrectly shaped bounds data")
     #
     if __newClip:
         __Vector = __Vector.clip(
-            __Bounds[:,0].reshape(__Vector.shape),
-            __Bounds[:,1].reshape(__Vector.shape),
-            )
+            __Bounds[:, 0].reshape(__Vector.shape),
+            __Bounds[:, 1].reshape(__Vector.shape),
+        )
     else:
-        __Vector = numpy.max(numpy.hstack((__Vector.reshape((-1,1)),numpy.asmatrix(__Bounds)[:,0])),axis=1)
-        __Vector = numpy.min(numpy.hstack((__Vector.reshape((-1,1)),numpy.asmatrix(__Bounds)[:,1])),axis=1)
+        __Vector = numpy.max(numpy.hstack((__Vector.reshape((-1, 1)), numpy.asmatrix(__Bounds)[:, 0])), axis=1)
+        __Vector = numpy.min(numpy.hstack((__Vector.reshape((-1, 1)), numpy.asmatrix(__Bounds)[:, 1])), axis=1)
         __Vector = numpy.asarray(__Vector)
     #
     return __Vector
 
 # ==============================================================================
-def Apply3DVarRecentringOnEnsemble(__EnXn, __EnXf, __Ynpu, __HO, __R, __B, __Betaf):
+def VariablesAndIncrementsBounds( __Bounds, __BoxBounds, __Xini, __Name, __Multiplier = 1. ):
+    __Bounds    = ForceNumericBounds( __Bounds )
+    __BoxBounds = ForceNumericBounds( __BoxBounds )
+    if __Bounds is None and __BoxBounds is None:
+        raise ValueError("Algorithm %s requires bounds on all variables (by Bounds), or on all variable increments (by BoxBounds), or both, to be explicitly given."%(__Name,))
+    elif __Bounds is None and __BoxBounds is not None:
+        __Bounds    = __BoxBounds
+        logging.debug("%s Definition of parameter bounds from current parameter increment bounds"%(__Name,))
+    elif __Bounds is not None and __BoxBounds is None:
+        __BoxBounds = __Multiplier * (__Bounds - __Xini.reshape((-1, 1)))  # "M * [Xmin,Xmax]-Xini"
+        logging.debug("%s Definition of parameter increment bounds from current parameter bounds"%(__Name,))
+    return __Bounds, __BoxBounds
+
+# ==============================================================================
+def Apply3DVarRecentringOnEnsemble( __EnXn, __EnXf, __Ynpu, __HO, __R, __B, __SuppPars ):
     "Recentre l'ensemble Xn autour de l'analyse 3DVAR"
+    __Betaf = __SuppPars["HybridCovarianceEquilibrium"]
     #
     Xf = EnsembleMean( __EnXf )
     Pf = Covariance( asCovariance=EnsembleErrorCovariance(__EnXf) )
-    Pf = (1 - __Betaf) * __B + __Betaf * Pf
+    Pf = (1 - __Betaf) * __B.asfullmatrix(Xf.size) + __Betaf * Pf
     #
     selfB = PartialAlgorithm("3DVAR")
     selfB._parameters["Minimizer"] = "LBFGSB"
-    selfB._parameters["MaximumNumberOfSteps"] = 15000
-    selfB._parameters["CostDecrementTolerance"] = 1.e-7
+    selfB._parameters["MaximumNumberOfIterations"] = __SuppPars["HybridMaximumNumberOfIterations"]
+    selfB._parameters["CostDecrementTolerance"] = __SuppPars["HybridCostDecrementTolerance"]
     selfB._parameters["ProjectedGradientTolerance"] = -1
     selfB._parameters["GradientNormTolerance"] = 1.e-05
     selfB._parameters["StoreInternalVariables"] = False
@@ -834,3604 +1101,445 @@ def Apply3DVarRecentringOnEnsemble(__EnXn, __EnXf, __Ynpu, __HO, __R, __B, __Bet
     selfB._parameters["optdisp"] = 0
     selfB._parameters["Bounds"] = None
     selfB._parameters["InitializationPoint"] = Xf
-    std3dvar(selfB, Xf, __Ynpu, None, __HO, None, None, __R, Pf, None)
-    Xa = selfB.get("Analysis")[-1].reshape((-1,1))
+    from daAlgorithms.Atoms import std3dvar
+    std3dvar.std3dvar(selfB, Xf, __Ynpu, None, __HO, None, __R, Pf)
+    Xa = selfB.get("Analysis")[-1].reshape((-1, 1))
     del selfB
     #
     return Xa + EnsembleOfAnomalies( __EnXn )
 
 # ==============================================================================
-def c2ukf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
-    """
-    Constrained Unscented Kalman Filter
-    """
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        selfA._parameters["StoreInternalVariables"] = True
-    selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
-    #
-    L     = Xb.size
-    Alpha = selfA._parameters["Alpha"]
-    Beta  = selfA._parameters["Beta"]
-    if selfA._parameters["Kappa"] == 0:
-        if selfA._parameters["EstimationOf"] == "State":
-            Kappa = 0
-        elif selfA._parameters["EstimationOf"] == "Parameters":
-            Kappa = 3 - L
-    else:
-        Kappa = selfA._parameters["Kappa"]
-    Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
-    Gamma  = math.sqrt( L + Lambda )
-    #
-    Ww = []
-    Ww.append( 0. )
-    for i in range(2*L):
-        Ww.append( 1. / (2.*(L + Lambda)) )
-    #
-    Wm = numpy.array( Ww )
-    Wm[0] = Lambda / (L + Lambda)
-    Wc = numpy.array( Ww )
-    Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
-    #
-    # Opérateurs
-    Hm = HO["Direct"].appliedControledFormTo
-    #
-    if selfA._parameters["EstimationOf"] == "State":
-        Mm = EM["Direct"].appliedControledFormTo
-    #
-    if CM is not None and "Tangent" in CM and U is not None:
-        Cm = CM["Tangent"].asMatrix(Xb)
-    else:
-        Cm = None
-    #
-    # Durée d'observation et tailles
-    if hasattr(Y,"stepnumber"):
-        duration = Y.stepnumber()
-        __p = numpy.cumprod(Y.shape())[-1]
-    else:
-        duration = 2
-        __p = numpy.array(Y).size
-    #
-    # Précalcul des inversions de B et R
-    if selfA._parameters["StoreInternalVariables"] \
-        or selfA._toStore("CostFunctionJ") \
-        or selfA._toStore("CostFunctionJb") \
-        or selfA._toStore("CostFunctionJo") \
-        or selfA._toStore("CurrentOptimum") \
-        or selfA._toStore("APosterioriCovariance"):
-        BI = B.getI()
-        RI = R.getI()
-    #
-    __n = Xb.size
-    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
-    #
-    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
-        Xn = Xb
-        if hasattr(B,"asfullmatrix"):
-            Pn = B.asfullmatrix(__n)
-        else:
-            Pn = B
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        selfA.StoredVariables["Analysis"].store( Xb )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
-    elif selfA._parameters["nextStep"]:
-        Xn = selfA._getInternalState("Xn")
-        Pn = selfA._getInternalState("Pn")
-    #
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        XaMin            = Xn
-        previousJMinimum = numpy.finfo(float).max
-    #
-    for step in range(duration-1):
-        if hasattr(Y,"store"):
-            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
-        else:
-            Ynpu = numpy.ravel( Y ).reshape((__p,1))
-        #
-        if U is not None:
-            if hasattr(U,"store") and len(U)>1:
-                Un = numpy.ravel( U[step] ).reshape((-1,1))
-            elif hasattr(U,"store") and len(U)==1:
-                Un = numpy.ravel( U[0] ).reshape((-1,1))
-            else:
-                Un = numpy.ravel( U ).reshape((-1,1))
-        else:
-            Un = None
-        #
-        Pndemi = numpy.real(scipy.linalg.sqrtm(Pn))
-        Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
-        nbSpts = 2*Xn.size+1
-        #
-        if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
-            for point in range(nbSpts):
-                Xnp[:,point] = ApplyBounds( Xnp[:,point], selfA._parameters["Bounds"] )
-        #
-        XEtnnp = []
-        for point in range(nbSpts):
-            if selfA._parameters["EstimationOf"] == "State":
-                XEtnnpi = numpy.asarray( Mm( (Xnp[:,point], Un) ) ).reshape((-1,1))
-                if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
-                    Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
-                    XEtnnpi = XEtnnpi + Cm @ Un
-                if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
-                    XEtnnpi = ApplyBounds( XEtnnpi, selfA._parameters["Bounds"] )
-            elif selfA._parameters["EstimationOf"] == "Parameters":
-                # --- > Par principe, M = Id, Q = 0
-                XEtnnpi = Xnp[:,point]
-            XEtnnp.append( numpy.ravel(XEtnnpi).reshape((-1,1)) )
-        XEtnnp = numpy.concatenate( XEtnnp, axis=1 )
-        #
-        Xncm = ( XEtnnp * Wm ).sum(axis=1)
-        #
-        if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
-            Xncm = ApplyBounds( Xncm, selfA._parameters["Bounds"] )
-        #
-        if selfA._parameters["EstimationOf"] == "State":        Pnm = Q
-        elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
-        for point in range(nbSpts):
-            Pnm += Wc[i] * ((XEtnnp[:,point]-Xncm).reshape((-1,1)) * (XEtnnp[:,point]-Xncm))
-        #
-        if selfA._parameters["EstimationOf"] == "Parameters" and selfA._parameters["Bounds"] is not None:
-            Pnmdemi = selfA._parameters["Reconditioner"] * numpy.real(scipy.linalg.sqrtm(Pnm))
-        else:
-            Pnmdemi = numpy.real(scipy.linalg.sqrtm(Pnm))
-        #
-        Xnnp = numpy.hstack([Xncm.reshape((-1,1)), Xncm.reshape((-1,1))+Gamma*Pnmdemi, Xncm.reshape((-1,1))-Gamma*Pnmdemi])
-        #
-        if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
-            for point in range(nbSpts):
-                Xnnp[:,point] = ApplyBounds( Xnnp[:,point], selfA._parameters["Bounds"] )
-        #
-        Ynnp = []
-        for point in range(nbSpts):
-            if selfA._parameters["EstimationOf"] == "State":
-                Ynnpi = Hm( (Xnnp[:,point], None) )
-            elif selfA._parameters["EstimationOf"] == "Parameters":
-                Ynnpi = Hm( (Xnnp[:,point], Un) )
-            Ynnp.append( numpy.ravel(Ynnpi).reshape((-1,1)) )
-        Ynnp = numpy.concatenate( Ynnp, axis=1 )
-        #
-        Yncm = ( Ynnp * Wm ).sum(axis=1)
-        #
-        Pyyn = R
-        Pxyn = 0.
-        for point in range(nbSpts):
-            Pyyn += Wc[i] * ((Ynnp[:,point]-Yncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
-            Pxyn += Wc[i] * ((Xnnp[:,point]-Xncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
-        #
-        _Innovation  = Ynpu - Yncm.reshape((-1,1))
-        if selfA._parameters["EstimationOf"] == "Parameters":
-            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
-                _Innovation = _Innovation - Cm @ Un
-        #
-        Kn = Pxyn * Pyyn.I
-        Xn = Xncm.reshape((-1,1)) + Kn * _Innovation
-        Pn = Pnm - Kn * Pyyn * Kn.T
-        #
-        if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
-            Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
-        #
-        Xa = Xn # Pointeurs
-        #--------------------------
-        selfA._setInternalState("Xn", Xn)
-        selfA._setInternalState("Pn", Pn)
-        #--------------------------
-        #
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        # ---> avec analysis
-        selfA.StoredVariables["Analysis"].store( Xa )
-        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) )
-        if selfA._toStore("InnovationAtCurrentAnalysis"):
-            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
-        # ---> avec current state
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CurrentState"):
-            selfA.StoredVariables["CurrentState"].store( Xn )
-        if selfA._toStore("ForecastState"):
-            selfA.StoredVariables["ForecastState"].store( Xncm )
-        if selfA._toStore("ForecastCovariance"):
-            selfA.StoredVariables["ForecastCovariance"].store( Pnm )
-        if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( Xncm - Xa )
-        if selfA._toStore("InnovationAtCurrentState"):
-            selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
-        if selfA._toStore("SimulatedObservationAtCurrentState") \
-            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm )
-        # ---> autres
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CostFunctionJ") \
-            or selfA._toStore("CostFunctionJb") \
-            or selfA._toStore("CostFunctionJo") \
-            or selfA._toStore("CurrentOptimum") \
-            or selfA._toStore("APosterioriCovariance"):
-            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
-            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
-            J   = Jb + Jo
-            selfA.StoredVariables["CostFunctionJb"].store( Jb )
-            selfA.StoredVariables["CostFunctionJo"].store( Jo )
-            selfA.StoredVariables["CostFunctionJ" ].store( J )
-            #
-            if selfA._toStore("IndexOfOptimum") \
-                or selfA._toStore("CurrentOptimum") \
-                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
-                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
-                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
-                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-            if selfA._toStore("IndexOfOptimum"):
-                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
-            if selfA._toStore("CurrentOptimum"):
-                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
-            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
-            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
-            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
-            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
-        if selfA._parameters["EstimationOf"] == "Parameters" \
-            and J < previousJMinimum:
-            previousJMinimum    = J
-            XaMin               = Xa
-            if selfA._toStore("APosterioriCovariance"):
-                covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
-    #
-    # Stockage final supplémentaire de l'optimum en estimation de paramètres
-    # ----------------------------------------------------------------------
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        selfA.StoredVariables["Analysis"].store( XaMin )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
-        if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
-    #
-    return 0
+def GenerateRandomPointInHyperSphere( __Center, __Radius ):
+    "Génère un point aléatoire uniformément à l'intérieur d'une hyper-sphère"
+    __Dimension  = numpy.asarray( __Center ).size
+    __GaussDelta = numpy.random.normal( 0, 1, size=__Center.shape )
+    __VectorNorm = numpy.linalg.norm( __GaussDelta )
+    __PointOnHS  = __Radius * (__GaussDelta / __VectorNorm)
+    __MoveInHS   = math.exp( math.log(numpy.random.uniform()) / __Dimension)  # rand()**1/n
+    __PointInHS  = __MoveInHS * __PointOnHS
+    return __Center + __PointInHS
 
 # ==============================================================================
-def cekf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
-    """
-    Contrained Extended Kalman Filter
-    """
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        selfA._parameters["StoreInternalVariables"] = True
-    selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
-    #
-    # Opérateurs
-    H = HO["Direct"].appliedControledFormTo
-    #
-    if selfA._parameters["EstimationOf"] == "State":
-        M = EM["Direct"].appliedControledFormTo
-    #
-    if CM is not None and "Tangent" in CM and U is not None:
-        Cm = CM["Tangent"].asMatrix(Xb)
-    else:
-        Cm = None
-    #
-    # Durée d'observation et tailles
-    if hasattr(Y,"stepnumber"):
-        duration = Y.stepnumber()
-        __p = numpy.cumprod(Y.shape())[-1]
-    else:
-        duration = 2
-        __p = numpy.array(Y).size
-    #
-    # Précalcul des inversions de B et R
-    if selfA._parameters["StoreInternalVariables"] \
-        or selfA._toStore("CostFunctionJ") \
-        or selfA._toStore("CostFunctionJb") \
-        or selfA._toStore("CostFunctionJo") \
-        or selfA._toStore("CurrentOptimum") \
-        or selfA._toStore("APosterioriCovariance"):
-        BI = B.getI()
-        RI = R.getI()
-    #
-    __n = Xb.size
-    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
-    #
-    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
-        Xn = Xb
-        Pn = B
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        selfA.StoredVariables["Analysis"].store( Xb )
-        if selfA._toStore("APosterioriCovariance"):
-            if hasattr(B,"asfullmatrix"):
-                selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
-            else:
-                selfA.StoredVariables["APosterioriCovariance"].store( B )
-        selfA._setInternalState("seed", numpy.random.get_state())
-    elif selfA._parameters["nextStep"]:
-        Xn = selfA._getInternalState("Xn")
-        Pn = selfA._getInternalState("Pn")
-    #
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        XaMin            = Xn
-        previousJMinimum = numpy.finfo(float).max
-    #
-    for step in range(duration-1):
-        if hasattr(Y,"store"):
-            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
+class GenerateWeightsAndSigmaPoints(object):
+    "Génère les points sigma et les poids associés"
+
+    def __init__(self,
+                 Nn=0, EO="State", VariantM="UKF",
+                 Alpha=None, Beta=2., Kappa=0.):
+        self.Nn = int(Nn)
+        self.Alpha = numpy.longdouble( Alpha )
+        self.Beta  = numpy.longdouble( Beta )
+        if abs(Kappa) < 2 * mpr:
+            if EO == "Parameters" and VariantM == "UKF":
+                self.Kappa = 3 - self.Nn
+            else:  # EO == "State":
+                self.Kappa = 0
         else:
-            Ynpu = numpy.ravel( Y ).reshape((__p,1))
-        #
-        Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
-        Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
-        Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
-        Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
-        #
-        if selfA._parameters["EstimationOf"] == "State":
-            Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
-            Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
-            Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
-            Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
-        #
-        if U is not None:
-            if hasattr(U,"store") and len(U)>1:
-                Un = numpy.ravel( U[step] ).reshape((-1,1))
-            elif hasattr(U,"store") and len(U)==1:
-                Un = numpy.ravel( U[0] ).reshape((-1,1))
-            else:
-                Un = numpy.ravel( U ).reshape((-1,1))
+            self.Kappa = Kappa
+        self.Kappa  = numpy.longdouble( self.Kappa )
+        self.Lambda = self.Alpha**2 * ( self.Nn + self.Kappa ) - self.Nn
+        self.Gamma  = self.Alpha * numpy.sqrt( self.Nn + self.Kappa )
+        # Rq.: Gamma = sqrt(n+Lambda) = Alpha*sqrt(n+Kappa)
+        assert 0. < self.Alpha <= 1., "Alpha has to be between 0 strictly and 1 included"
+        #
+        if VariantM == "UKF":
+            self.Wm, self.Wc, self.SC = self.__UKF2000()
+        elif VariantM == "S3F":
+            self.Wm, self.Wc, self.SC = self.__S3F2022()
+        elif VariantM == "MSS":
+            self.Wm, self.Wc, self.SC = self.__MSS2011()
+        elif VariantM == "5OS":
+            self.Wm, self.Wc, self.SC = self.__5OS2002()
         else:
-            Un = None
-        #
-        if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
-            Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
-        #
-        if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
-            Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
-            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
-                Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
-                Xn_predicted = Xn_predicted + Cm @ Un
-            Pn_predicted = Q + Mt * (Pn * Ma)
-        elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
-            # --- > Par principe, M = Id, Q = 0
-            Xn_predicted = Xn
-            Pn_predicted = Pn
-        #
-        if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
-            Xn_predicted = ApplyBounds( Xn_predicted, selfA._parameters["Bounds"] )
-        #
-        if selfA._parameters["EstimationOf"] == "State":
-            HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
-            _Innovation  = Ynpu - HX_predicted
-        elif selfA._parameters["EstimationOf"] == "Parameters":
-            HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
-            _Innovation  = Ynpu - HX_predicted
-            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
-                _Innovation = _Innovation - Cm @ Un
-        #
-        Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
-        Xn = Xn_predicted + Kn * _Innovation
-        Pn = Pn_predicted - Kn * Ht * Pn_predicted
-        #
-        if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
-            Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
-        #
-        Xa = Xn # Pointeurs
-        #--------------------------
-        selfA._setInternalState("Xn", Xn)
-        selfA._setInternalState("Pn", Pn)
-        #--------------------------
-        #
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        # ---> avec analysis
-        selfA.StoredVariables["Analysis"].store( Xa )
-        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
-        if selfA._toStore("InnovationAtCurrentAnalysis"):
-            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
-        # ---> avec current state
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CurrentState"):
-            selfA.StoredVariables["CurrentState"].store( Xn )
-        if selfA._toStore("ForecastState"):
-            selfA.StoredVariables["ForecastState"].store( Xn_predicted )
-        if selfA._toStore("ForecastCovariance"):
-            selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
-        if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
-        if selfA._toStore("InnovationAtCurrentState"):
-            selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
-        if selfA._toStore("SimulatedObservationAtCurrentState") \
-            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
-        # ---> autres
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CostFunctionJ") \
-            or selfA._toStore("CostFunctionJb") \
-            or selfA._toStore("CostFunctionJo") \
-            or selfA._toStore("CurrentOptimum") \
-            or selfA._toStore("APosterioriCovariance"):
-            Jb  = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
-            Jo  = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
-            J   = Jb + Jo
-            selfA.StoredVariables["CostFunctionJb"].store( Jb )
-            selfA.StoredVariables["CostFunctionJo"].store( Jo )
-            selfA.StoredVariables["CostFunctionJ" ].store( J )
-            #
-            if selfA._toStore("IndexOfOptimum") \
-                or selfA._toStore("CurrentOptimum") \
-                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
-                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
-                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
-                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-            if selfA._toStore("IndexOfOptimum"):
-                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
-            if selfA._toStore("CurrentOptimum"):
-                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
-            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
-            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
-            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
-            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
-        if selfA._parameters["EstimationOf"] == "Parameters" \
-            and J < previousJMinimum:
-            previousJMinimum    = J
-            XaMin               = Xa
-            if selfA._toStore("APosterioriCovariance"):
-                covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
-    #
-    # Stockage final supplémentaire de l'optimum en estimation de paramètres
-    # ----------------------------------------------------------------------
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        selfA.StoredVariables["Analysis"].store( XaMin )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
-        if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
-    #
-    return 0
+            raise ValueError("Variant \"%s\" is not a valid one."%VariantM)
+
+    def __UKF2000(self):
+        "Standard Set, Julier et al. 2000 (aka Canonical UKF)"
+        # Rq.: W^{(m)}_{i=/=0} = 1. / (2.*(n + Lambda))
+        Winn = 1. / (2. * ( self.Nn + self.Kappa ) * self.Alpha**2)
+        Ww = []
+        Ww.append( 0. )
+        for point in range(2 * self.Nn):
+            Ww.append( Winn )
+        # Rq.: LsLpL = Lambda / (n + Lambda)
+        LsLpL = 1. - self.Nn / (self.Alpha**2 * ( self.Nn + self.Kappa ))
+        Wm = numpy.array( Ww )
+        Wm[0] = LsLpL
+        Wc = numpy.array( Ww )
+        Wc[0] = LsLpL + (1. - self.Alpha**2 + self.Beta)
+        # OK: assert abs(Wm.sum()-1.) < self.Nn*mpr, "UKF ill-conditioned %s >= %s"%(abs(Wm.sum()-1.), self.Nn*mpr)
+        #
+        SC = numpy.zeros((self.Nn, len(Ww)))
+        for ligne in range(self.Nn):
+            it = ligne + 1
+            SC[ligne, it          ] = self.Gamma
+            SC[ligne, self.Nn + it] = -self.Gamma
+        #
+        return Wm, Wc, SC
+
+    def __S3F2022(self):
+        "Scaled Spherical Simplex Set, Papakonstantinou et al. 2022"
+        # Rq.: W^{(m)}_{i=/=0} = (n + Kappa) / ((n + Lambda) * (n + 1 + Kappa))
+        Winn = 1. / ((self.Nn + 1. + self.Kappa) * self.Alpha**2)
+        Ww = []
+        Ww.append( 0. )
+        for point in range(self.Nn + 1):
+            Ww.append( Winn )
+        # Rq.: LsLpL = Lambda / (n + Lambda)
+        LsLpL = 1. - self.Nn / (self.Alpha**2 * ( self.Nn + self.Kappa ))
+        Wm = numpy.array( Ww )
+        Wm[0] = LsLpL
+        Wc = numpy.array( Ww )
+        Wc[0] = LsLpL + (1. - self.Alpha**2 + self.Beta)
+        # OK: assert abs(Wm.sum()-1.) < self.Nn*mpr, "S3F ill-conditioned %s >= %s"%(abs(Wm.sum()-1.), self.Nn*mpr)
+        #
+        SC = numpy.zeros((self.Nn, len(Ww)))
+        for ligne in range(self.Nn):
+            it = ligne + 1
+            q_t = it / math.sqrt( it * (it + 1) * Winn )
+            SC[ligne, 1:it + 1] = -q_t / it
+            SC[ligne, it + 1  ] = q_t
+        #
+        return Wm, Wc, SC
+
+    def __MSS2011(self):
+        "Minimum Set, Menegaz et al. 2011"
+        rho2 = (1 - self.Alpha) / self.Nn
+        Cc = numpy.real(scipy.linalg.sqrtm( numpy.identity(self.Nn) - rho2 ))
+        Ww = self.Alpha * rho2 * scipy.linalg.inv(Cc) @ numpy.ones(self.Nn) @ scipy.linalg.inv(Cc.T)
+        Wm = Wc = numpy.concatenate((Ww, [self.Alpha]))
+        # OK: assert abs(Wm.sum()-1.) < self.Nn*mpr, "MSS ill-conditioned %s >= %s"%(abs(Wm.sum()-1.), self.Nn*mpr)
+        #
+        # inv(sqrt(W)) = diag(inv(sqrt(W)))
+        SC1an = Cc @ numpy.diag(1. / numpy.sqrt( Ww ))
+        SCnpu = (- numpy.sqrt(rho2) / numpy.sqrt(self.Alpha)) * numpy.ones(self.Nn).reshape((-1, 1))
+        SC = numpy.concatenate((SC1an, SCnpu), axis=1)
+        #
+        return Wm, Wc, SC
+
+    def __5OS2002(self):
+        "Fifth Order Set, Lerner 2002"
+        Ww = []
+        for point in range(2 * self.Nn):
+            Ww.append( (4. - self.Nn) / 18. )
+        for point in range(2 * self.Nn, 2 * self.Nn**2):
+            Ww.append( 1. / 36. )
+        Ww.append( (self.Nn**2 - 7 * self.Nn) / 18. + 1.)
+        Wm = Wc = numpy.array( Ww )
+        # OK: assert abs(Wm.sum()-1.) < self.Nn*mpr, "5OS ill-conditioned %s >= %s"%(abs(Wm.sum()-1.), self.Nn*mpr)
+        #
+        xi1n  = numpy.diag( math.sqrt(3) * numpy.ones( self.Nn ) )
+        xi2n  = numpy.diag( -math.sqrt(3) * numpy.ones( self.Nn ) )
+        #
+        xi3n1 = numpy.zeros((int((self.Nn - 1) * self.Nn / 2), self.Nn), dtype=float)
+        xi3n2 = numpy.zeros((int((self.Nn - 1) * self.Nn / 2), self.Nn), dtype=float)
+        xi4n1 = numpy.zeros((int((self.Nn - 1) * self.Nn / 2), self.Nn), dtype=float)
+        xi4n2 = numpy.zeros((int((self.Nn - 1) * self.Nn / 2), self.Nn), dtype=float)
+        ia = 0
+        for i1 in range(self.Nn - 1):
+            for i2 in range(i1 + 1, self.Nn):
+                xi3n1[ia, i1] = xi3n2[ia, i2] = math.sqrt(3)
+                xi3n2[ia, i1] = xi3n1[ia, i2] = -math.sqrt(3)
+                # --------------------------------
+                xi4n1[ia, i1] = xi4n1[ia, i2] = math.sqrt(3)
+                xi4n2[ia, i1] = xi4n2[ia, i2] = -math.sqrt(3)
+                ia += 1
+        SC = numpy.concatenate((xi1n, xi2n, xi3n1, xi3n2, xi4n1, xi4n2, numpy.zeros((1, self.Nn)))).T
+        #
+        return Wm, Wc, SC
+
+    def nbOfPoints(self):
+        assert self.Nn      == self.SC.shape[0], "Size mismatch %i =/= %i"%(self.Nn, self.SC.shape[0])
+        assert self.Wm.size == self.SC.shape[1], "Size mismatch %i =/= %i"%(self.Wm.size, self.SC.shape[1])
+        assert self.Wm.size == self.Wc.size, "Size mismatch %i =/= %i"%(self.Wm.size, self.Wc.size)
+        return self.Wm.size
+
+    def get(self):
+        return self.Wm, self.Wc, self.SC
+
+    def __repr__(self):
+        "x.__repr__() <==> repr(x)"
+        msg  = ""
+        msg += "    Alpha   = %s\n"%self.Alpha
+        msg += "    Beta    = %s\n"%self.Beta
+        msg += "    Kappa   = %s\n"%self.Kappa
+        msg += "    Lambda  = %s\n"%self.Lambda
+        msg += "    Gamma   = %s\n"%self.Gamma
+        msg += "    Wm      = %s\n"%self.Wm
+        msg += "    Wc      = %s\n"%self.Wc
+        msg += "    sum(Wm) = %s\n"%numpy.sum(self.Wm)
+        msg += "    SC      =\n%s\n"%self.SC
+        return msg
 
 # ==============================================================================
-def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
-    """
-    EnKS
-    """
-    #
-    # Opérateurs
-    H = HO["Direct"].appliedControledFormTo
-    #
-    if selfA._parameters["EstimationOf"] == "State":
-        M = EM["Direct"].appliedControledFormTo
-    #
-    if CM is not None and "Tangent" in CM and U is not None:
-        Cm = CM["Tangent"].asMatrix(Xb)
+def GetNeighborhoodTopology( __ntype, __ipop ):
+    "Renvoi une topologie de connexion pour une population de points"
+    if __ntype in ["FullyConnectedNeighborhood", "FullyConnectedNeighbourhood", "gbest"]:
+        __topology = [__ipop for __i in __ipop]
+    elif __ntype in ["RingNeighborhoodWithRadius1", "RingNeighbourhoodWithRadius1", "lbest"]:
+        __cpop = list(__ipop[-1:]) + list(__ipop) + list(__ipop[:1])
+        __topology = [__cpop[__n:__n + 3] for __n in range(len(__ipop))]
+    elif __ntype in ["RingNeighborhoodWithRadius2", "RingNeighbourhoodWithRadius2"]:
+        __cpop = list(__ipop[-2:]) + list(__ipop) + list(__ipop[:2])
+        __topology = [__cpop[__n:__n + 5] for __n in range(len(__ipop))]
+    elif __ntype in ["AdaptativeRandomWith3Neighbors", "AdaptativeRandomWith3Neighbours", "abest"]:
+        __cpop = 3 * list(__ipop)
+        __topology = [[__i] + list(numpy.random.choice(__cpop, 3)) for __i in __ipop]
+    elif __ntype in ["AdaptativeRandomWith5Neighbors", "AdaptativeRandomWith5Neighbours"]:
+        __cpop = 5 * list(__ipop)
+        __topology = [[__i] + list(numpy.random.choice(__cpop, 5)) for __i in __ipop]
     else:
-        Cm = None
-    #
-    # Précalcul des inversions de B et R
-    RIdemi = R.sqrtmI()
-    #
-    # Durée d'observation et tailles
-    LagL = selfA._parameters["SmootherLagL"]
-    if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
-        raise ValueError("Fixed-lag smoother requires a series of observation")
-    if Y.stepnumber() < LagL:
-        raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
-    duration = Y.stepnumber()
-    __p = numpy.cumprod(Y.shape())[-1]
-    __n = Xb.size
-    __m = selfA._parameters["NumberOfMembers"]
-    #
-    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
-        selfA.StoredVariables["Analysis"].store( Xb )
-        if selfA._toStore("APosterioriCovariance"):
-            if hasattr(B,"asfullmatrix"):
-                selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
+        raise ValueError("Swarm topology type unavailable because name \"%s\" is unknown."%__ntype)
+    return __topology
+
+# ==============================================================================
+def FindIndexesFromNames( __NameOfLocations = None, __ExcludeLocations = None, ForceArray = False ):
+    "Exprime les indices des noms exclus, en ignorant les absents"
+    if __ExcludeLocations is None:
+        __ExcludeIndexes = ()
+    elif isinstance(__ExcludeLocations, (list, numpy.ndarray, tuple)) and len(__ExcludeLocations) == 0:
+        __ExcludeIndexes = ()
+    # ----------
+    elif __NameOfLocations is None:
+        try:
+            __ExcludeIndexes = numpy.asarray(__ExcludeLocations, dtype=int)
+        except ValueError as e:
+            if "invalid literal for int() with base 10:" in str(e):
+                raise ValueError("to exclude named locations, initial location name list can not be void and has to have the same length as one state")
             else:
-                selfA.StoredVariables["APosterioriCovariance"].store( B )
-    #
-    # Calcul direct initial (on privilégie la mémorisation au recalcul)
-    __seed = numpy.random.get_state()
-    selfB = copy.deepcopy(selfA)
-    selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
-    if VariantM == "EnKS16-KalmanFilterFormula":
-        etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
-    else:
-        raise ValueError("VariantM has to be chosen in the authorized methods list.")
-    if LagL > 0:
-        EL  = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
+                raise ValueError(str(e))
+    elif isinstance(__NameOfLocations, (list, numpy.ndarray, tuple)) and len(__NameOfLocations) == 0:
+        try:
+            __ExcludeIndexes = numpy.asarray(__ExcludeLocations, dtype=int)
+        except ValueError as e:
+            if "invalid literal for int() with base 10:" in str(e):
+                raise ValueError("to exclude named locations, initial location name list can not be void and has to have the same length as one state")
+            else:
+                raise ValueError(str(e))
+    # ----------
     else:
-        EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
-    selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
-    #
-    for step in range(LagL,duration-1):
-        #
-        sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
-        sEL.append(None)
-        #
-        if hasattr(Y,"store"):
-            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
-        else:
-            Ynpu = numpy.ravel( Y ).reshape((__p,1))
-        #
-        if U is not None:
-            if hasattr(U,"store") and len(U)>1:
-                Un = numpy.ravel( U[step] ).reshape((-1,1))
-            elif hasattr(U,"store") and len(U)==1:
-                Un = numpy.ravel( U[0] ).reshape((-1,1))
+        try:
+            __ExcludeIndexes = numpy.asarray(__ExcludeLocations, dtype=int)
+        except ValueError as e:
+            if "invalid literal for int() with base 10:" in str(e):
+                if len(__NameOfLocations) < 1.e6 + 1 and len(__ExcludeLocations) > 1500:
+                    __Heuristic = True
+                else:
+                    __Heuristic = False
+                if ForceArray or __Heuristic:
+                    # Recherche par array permettant des noms invalides, peu efficace
+                    __NameToIndex = dict(numpy.array((
+                        __NameOfLocations,
+                        range(len(__NameOfLocations))
+                    )).T)
+                    __ExcludeIndexes = numpy.asarray([__NameToIndex.get(k, -1) for k in __ExcludeLocations], dtype=int)
+                    #
+                else:
+                    # Recherche par liste permettant des noms invalides, très efficace
+                    def __NameToIndex_get( cle, default = -1 ):
+                        if cle in __NameOfLocations:
+                            return __NameOfLocations.index(cle)
+                        else:
+                            return default
+                    __ExcludeIndexes = numpy.asarray([__NameToIndex_get(k, -1) for k in __ExcludeLocations], dtype=int)
+                    #
+                    # Recherche par liste interdisant des noms invalides, mais encore un peu plus efficace
+                    # __ExcludeIndexes = numpy.asarray([__NameOfLocations.index(k) for k in __ExcludeLocations], dtype=int)
+                    #
+                # Ignore les noms absents
+                __ExcludeIndexes = numpy.compress(__ExcludeIndexes > -1, __ExcludeIndexes)
+                if len(__ExcludeIndexes) == 0:
+                    __ExcludeIndexes = ()
+            else:
+                raise ValueError(str(e))
+    # ----------
+    return __ExcludeIndexes
+
+# ==============================================================================
+def BuildComplexSampleList(
+        __SampleAsnUplet,
+        __SampleAsExplicitHyperCube,
+        __SampleAsMinMaxStepHyperCube,
+        __SampleAsMinMaxLatinHyperCube,
+        __SampleAsMinMaxSobolSequence,
+        __SampleAsIndependantRandomVariables,
+        __X0,
+        __Seed = None ):
+    # ---------------------------
+    if len(__SampleAsnUplet) > 0:
+        sampleList = __SampleAsnUplet
+        for i, Xx in enumerate(sampleList):
+            if numpy.ravel(Xx).size != __X0.size:
+                raise ValueError("The size %i of the %ith state X in the sample and %i of the checking point Xb are different, they have to be identical."%(numpy.ravel(Xx).size, i + 1, __X0.size))
+    # ---------------------------
+    elif len(__SampleAsExplicitHyperCube) > 0:
+        sampleList = itertools.product(*list(__SampleAsExplicitHyperCube))
+    # ---------------------------
+    elif len(__SampleAsMinMaxStepHyperCube) > 0:
+        coordinatesList = []
+        for i, dim in enumerate(__SampleAsMinMaxStepHyperCube):
+            if len(dim) != 3:
+                raise ValueError("For dimension %i, the variable definition \"%s\" is incorrect, it should be [min,max,step]."%(i, dim))
             else:
-                Un = numpy.ravel( U ).reshape((-1,1))
+                coordinatesList.append(numpy.linspace(dim[0], dim[1], 1 + int((float(dim[1]) - float(dim[0])) / float(dim[2]))))
+        sampleList = itertools.product(*coordinatesList)
+    # ---------------------------
+    elif len(__SampleAsMinMaxLatinHyperCube) > 0:
+        if vt(scipy.version.version) <= vt("1.7.0"):
+            __msg = "In order to use Latin Hypercube sampling, you must at least use Scipy version 1.7.0 (and you are presently using Scipy %s). A void sample is then generated."%scipy.version.version
+            warnings.warn(__msg, FutureWarning, stacklevel=50)
+            sampleList = []
         else:
-            Un = None
-        #
-        #--------------------------
-        if VariantM == "EnKS16-KalmanFilterFormula":
-            if selfA._parameters["EstimationOf"] == "State": # Forecast
-                EL = M( [(EL[:,i], Un) for i in range(__m)],
-                    argsAsSerie = True,
-                    returnSerieAsArrayMatrix = True )
-                EL = EnsemblePerturbationWithGivenCovariance( EL, Q )
-                EZ = H( [(EL[:,i], Un) for i in range(__m)],
-                    argsAsSerie = True,
-                    returnSerieAsArrayMatrix = True )
-                if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
-                    Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
-                    EZ = EZ + Cm @ Un
-            elif selfA._parameters["EstimationOf"] == "Parameters":
-                # --- > Par principe, M = Id, Q = 0
-                EZ = H( [(EL[:,i], Un) for i in range(__m)],
-                    argsAsSerie = True,
-                    returnSerieAsArrayMatrix = True )
-            #
-            vEm   = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
-            vZm   = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
-            #
-            mS    = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
-            mS    = mS.reshape((-1,__m)) # Pour dimension 1
-            delta = RIdemi @ ( Ynpu - vZm )
-            mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
-            vw    = mT @ mS.T @ delta
-            #
-            Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
-            mU    = numpy.identity(__m)
-            wTU   = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
-            #
-            EX    = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
-            EL    = vEm + EX @ wTU
-            #
-            sEL[LagL] = EL
-            for irl in range(LagL): # Lissage des L précédentes analysis
-                vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
-                EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
-                sEL[irl] = vEm + EX @ wTU
-            #
-            # Conservation de l'analyse retrospective d'ordre 0 avant rotation
-            Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
-            if selfA._toStore("APosterioriCovariance"):
-                EXn = sEL[0]
-            #
-            for irl in range(LagL):
-                sEL[irl] = sEL[irl+1]
-            sEL[LagL] = None
-        #--------------------------
+            __spDesc = list(__SampleAsMinMaxLatinHyperCube)
+            __nbDime, __nbSamp  = map(int, __spDesc.pop())  # Réduction du dernier
+            __sample = scipy.stats.qmc.LatinHypercube(
+                d = len(__spDesc),
+                seed = numpy.random.default_rng(__Seed),
+            )
+            __sample = __sample.random(n = __nbSamp)
+            __bounds = numpy.array(__spDesc)[:, 0:2]
+            __l_bounds = __bounds[:, 0]
+            __u_bounds = __bounds[:, 1]
+            sampleList = scipy.stats.qmc.scale(__sample, __l_bounds, __u_bounds)
+    # ---------------------------
+    elif len(__SampleAsMinMaxSobolSequence) > 0:
+        if vt(scipy.version.version) <= vt("1.7.0"):
+            __msg = "In order to use Latin Hypercube sampling, you must at least use Scipy version 1.7.0 (and you are presently using Scipy %s). A void sample is then generated."%scipy.version.version
+            warnings.warn(__msg, FutureWarning, stacklevel=50)
+            sampleList = []
         else:
-            raise ValueError("VariantM has to be chosen in the authorized methods list.")
-        #
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        # ---> avec analysis
-        selfA.StoredVariables["Analysis"].store( Xa )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
-    #
-    # Stockage des dernières analyses incomplètement remises à jour
-    for irl in range(LagL):
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
-        selfA.StoredVariables["Analysis"].store( Xa )
-    #
-    return 0
+            __spDesc = list(__SampleAsMinMaxSobolSequence)
+            __nbDime, __nbSamp  = map(int, __spDesc.pop())  # Réduction du dernier
+            if __nbDime != len(__spDesc):
+                warnings.warn("Declared space dimension (%i) is not equal to number of bounds (%i), the last one will be used."%(__nbDime, len(__spDesc)), FutureWarning, stacklevel=50)
+            __sample = scipy.stats.qmc.Sobol(
+                d = len(__spDesc),
+                seed = numpy.random.default_rng(__Seed),
+            )
+            __sample = __sample.random_base2(m = int(math.log2(__nbSamp)) + 1)
+            __bounds = numpy.array(__spDesc)[:, 0:2]
+            __l_bounds = __bounds[:, 0]
+            __u_bounds = __bounds[:, 1]
+            sampleList = scipy.stats.qmc.scale(__sample, __l_bounds, __u_bounds)
+    # ---------------------------
+    elif len(__SampleAsIndependantRandomVariables) > 0:
+        coordinatesList = []
+        for i, dim in enumerate(__SampleAsIndependantRandomVariables):
+            if len(dim) != 3:
+                raise ValueError("For dimension %i, the variable definition \"%s\" is incorrect, it should be ('distribution',(parameters),length) with distribution in ['normal'(mean,std),'lognormal'(mean,sigma),'uniform'(low,high),'weibull'(shape)]."%(i, dim))
+            elif not ( str(dim[0]) in ['normal', 'lognormal', 'uniform', 'weibull'] \
+                       and hasattr(numpy.random, str(dim[0])) ):
+                raise ValueError("For dimension %i, the distribution name \"%s\" is not allowed, please choose in ['normal'(mean,std),'lognormal'(mean,sigma),'uniform'(low,high),'weibull'(shape)]"%(i, str(dim[0])))
+            else:
+                distribution = getattr(numpy.random, str(dim[0]), 'normal')
+                coordinatesList.append(distribution(*dim[1], size=max(1, int(dim[2]))))
+        sampleList = itertools.product(*coordinatesList)
+    else:
+        sampleList = iter([__X0,])
+    # ----------
+    return sampleList
 
 # ==============================================================================
-def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
-    VariantM="KalmanFilterFormula",
-    Hybrid=None,
-    ):
+def multiXOsteps(
+        selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle,
+        __CovForecast = False ):
     """
-    Ensemble-Transform EnKF
+    Prévision multi-pas avec une correction par pas (multi-méthodes)
     """
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        selfA._parameters["StoreInternalVariables"] = True
-    #
-    # Opérateurs
-    H = HO["Direct"].appliedControledFormTo
     #
+    # Initialisation
+    # --------------
     if selfA._parameters["EstimationOf"] == "State":
-        M = EM["Direct"].appliedControledFormTo
-    #
-    if CM is not None and "Tangent" in CM and U is not None:
-        Cm = CM["Tangent"].asMatrix(Xb)
+        if len(selfA.StoredVariables["Analysis"]) == 0 or not selfA._parameters["nextStep"]:
+            Xn = numpy.asarray(Xb)
+            if __CovForecast:
+                Pn = B
+            selfA.StoredVariables["Analysis"].store( Xn )
+            if selfA._toStore("APosterioriCovariance"):
+                if hasattr(B, "asfullmatrix"):
+                    selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(Xn.size) )
+                else:
+                    selfA.StoredVariables["APosterioriCovariance"].store( B )
+            selfA._setInternalState("seed", numpy.random.get_state())
+        elif selfA._parameters["nextStep"]:
+            Xn = selfA._getInternalState("Xn")
+            if __CovForecast:
+                Pn = selfA._getInternalState("Pn")
     else:
-        Cm = None
+        Xn = numpy.asarray(Xb)
+        if __CovForecast:
+            Pn = B
     #
-    # Durée d'observation et tailles
-    if hasattr(Y,"stepnumber"):
+    if hasattr(Y, "stepnumber"):
         duration = Y.stepnumber()
-        __p = numpy.cumprod(Y.shape())[-1]
     else:
         duration = 2
-        __p = numpy.array(Y).size
-    #
-    # Précalcul des inversions de B et R
-    if selfA._parameters["StoreInternalVariables"] \
-        or selfA._toStore("CostFunctionJ") \
-        or selfA._toStore("CostFunctionJb") \
-        or selfA._toStore("CostFunctionJo") \
-        or selfA._toStore("CurrentOptimum") \
-        or selfA._toStore("APosterioriCovariance"):
-        BI = B.getI()
-        RI = R.getI()
-    elif VariantM != "KalmanFilterFormula":
-        RI = R.getI()
-    if VariantM == "KalmanFilterFormula":
-        RIdemi = R.sqrtmI()
     #
-    __n = Xb.size
-    __m = selfA._parameters["NumberOfMembers"]
-    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
-    previousJMinimum = numpy.finfo(float).max
-    #
-    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
-        Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
-        selfA.StoredVariables["Analysis"].store( Xb )
-        if selfA._toStore("APosterioriCovariance"):
-            if hasattr(B,"asfullmatrix"):
-                selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
-            else:
-                selfA.StoredVariables["APosterioriCovariance"].store( B )
-        selfA._setInternalState("seed", numpy.random.get_state())
-    elif selfA._parameters["nextStep"]:
-        Xn = selfA._getInternalState("Xn")
-    #
-    for step in range(duration-1):
-        numpy.random.set_state(selfA._getInternalState("seed"))
-        if hasattr(Y,"store"):
-            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
+    # Multi-steps
+    # -----------
+    for step in range(duration - 1):
+        selfA.StoredVariables["CurrentStepNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        #
+        if hasattr(Y, "store"):
+            Ynpu = numpy.asarray( Y[step + 1] ).reshape((-1, 1))
         else:
-            Ynpu = numpy.ravel( Y ).reshape((__p,1))
+            Ynpu = numpy.asarray( Y ).reshape((-1, 1))
         #
         if U is not None:
-            if hasattr(U,"store") and len(U)>1:
-                Un = numpy.ravel( U[step] ).reshape((-1,1))
-            elif hasattr(U,"store") and len(U)==1:
-                Un = numpy.ravel( U[0] ).reshape((-1,1))
+            if hasattr(U, "store") and len(U) > 1:
+                Un = numpy.asarray( U[step] ).reshape((-1, 1))
+            elif hasattr(U, "store") and len(U) == 1:
+                Un = numpy.asarray( U[0] ).reshape((-1, 1))
             else:
-                Un = numpy.ravel( U ).reshape((-1,1))
+                Un = numpy.asarray( U ).reshape((-1, 1))
         else:
             Un = None
         #
-        if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
-            Xn = CovarianceInflation( Xn,
-                selfA._parameters["InflationType"],
-                selfA._parameters["InflationFactor"],
-                )
-        #
-        if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
-            EMX = M( [(Xn[:,i], Un) for i in range(__m)],
-                argsAsSerie = True,
-                returnSerieAsArrayMatrix = True )
-            Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
-            HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
-                argsAsSerie = True,
-                returnSerieAsArrayMatrix = True )
-            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
-                Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
-                Xn_predicted = Xn_predicted + Cm @ Un
-        elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
+        # Predict (Time Update)
+        # ---------------------
+        if selfA._parameters["EstimationOf"] == "State":
+            if __CovForecast:
+                Mt = EM["Tangent"].asMatrix(Xn)
+                Mt = Mt.reshape(Xn.size, Xn.size)  # ADAO & check shape
+                Ma = EM["Adjoint"].asMatrix(Xn)
+                Ma = Ma.reshape(Xn.size, Xn.size)  # ADAO & check shape
+                Pn_predicted = Q + Mt @ (Pn @ Ma)
+            Mm = EM["Direct"].appliedControledFormTo
+            Xn_predicted = Mm( (Xn, Un) ).reshape((-1, 1))
+            if CM is not None and "Tangent" in CM and Un is not None:  # Attention : si Cm est aussi dans M, doublon !
+                Cm = CM["Tangent"].asMatrix(Xn_predicted)
+                Cm = Cm.reshape(Xn.size, Un.size)  # ADAO & check shape
+                Xn_predicted = Xn_predicted + (Cm @ Un).reshape((-1, 1))
+        elif selfA._parameters["EstimationOf"] == "Parameters":  # No forecast
             # --- > Par principe, M = Id, Q = 0
-            Xn_predicted = EMX = Xn
-            HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
-                argsAsSerie = True,
-                returnSerieAsArrayMatrix = True )
-        #
-        # Mean of forecast and observation of forecast
-        Xfm  = EnsembleMean( Xn_predicted )
-        Hfm  = EnsembleMean( HX_predicted )
-        #
-        # Anomalies
-        EaX   = EnsembleOfAnomalies( Xn_predicted, Xfm )
-        EaHX  = EnsembleOfAnomalies( HX_predicted, Hfm)
+            Xn_predicted = Xn
+            if __CovForecast:
+                Pn_predicted = Pn
+        Xn_predicted = numpy.asarray(Xn_predicted).reshape((-1, 1))
+        if selfA._toStore("ForecastState"):
+            selfA.StoredVariables["ForecastState"].store( Xn_predicted )
+        if __CovForecast:
+            if hasattr(Pn_predicted, "asfullmatrix"):
+                Pn_predicted = Pn_predicted.asfullmatrix(Xn.size)
+            else:
+                Pn_predicted = numpy.asarray(Pn_predicted).reshape((Xn.size, Xn.size))
+            if selfA._toStore("ForecastCovariance"):
+                selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
+        #
+        # Correct (Measurement Update)
+        # ----------------------------
+        if __CovForecast:
+            oneCycle(selfA, Xn_predicted, Ynpu, Un, HO, CM, R, Pn_predicted, True)
+        else:
+            oneCycle(selfA, Xn_predicted, Ynpu, Un, HO, CM, R, B, True)
         #
-        #--------------------------
-        if VariantM == "KalmanFilterFormula":
-            mS    = RIdemi * EaHX / math.sqrt(__m-1)
-            mS    = mS.reshape((-1,__m)) # Pour dimension 1
-            delta = RIdemi * ( Ynpu - Hfm )
-            mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
-            vw    = mT @ mS.T @ delta
-            #
-            Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
-            mU    = numpy.identity(__m)
-            #
-            EaX   = EaX / math.sqrt(__m-1)
-            Xn    = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
-        #--------------------------
-        elif VariantM == "Variational":
-            HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
-            def CostFunction(w):
-                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
-                _Jo = 0.5 * _A.T @ (RI * _A)
-                _Jb = 0.5 * (__m-1) * w.T @ w
-                _J  = _Jo + _Jb
-                return float(_J)
-            def GradientOfCostFunction(w):
-                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
-                _GardJo = - EaHX.T @ (RI * _A)
-                _GradJb = (__m-1) * w.reshape((__m,1))
-                _GradJ  = _GardJo + _GradJb
-                return numpy.ravel(_GradJ)
-            vw = scipy.optimize.fmin_cg(
-                f           = CostFunction,
-                x0          = numpy.zeros(__m),
-                fprime      = GradientOfCostFunction,
-                args        = (),
-                disp        = False,
-                )
-            #
-            Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
-            Htb = (__m-1) * numpy.identity(__m)
-            Hta = Hto + Htb
-            #
-            Pta = numpy.linalg.inv( Hta )
-            EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
-            #
-            Xn  = Xfm + EaX @ (vw[:,None] + EWa)
-        #--------------------------
-        elif VariantM == "FiniteSize11": # Jauge Boc2011
-            HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
-            def CostFunction(w):
-                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
-                _Jo = 0.5 * _A.T @ (RI * _A)
-                _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
-                _J  = _Jo + _Jb
-                return float(_J)
-            def GradientOfCostFunction(w):
-                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
-                _GardJo = - EaHX.T @ (RI * _A)
-                _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
-                _GradJ  = _GardJo + _GradJb
-                return numpy.ravel(_GradJ)
-            vw = scipy.optimize.fmin_cg(
-                f           = CostFunction,
-                x0          = numpy.zeros(__m),
-                fprime      = GradientOfCostFunction,
-                args        = (),
-                disp        = False,
-                )
-            #
-            Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
-            Htb = __m * \
-                ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
-                / (1 + 1/__m + vw.T @ vw)**2
-            Hta = Hto + Htb
-            #
-            Pta = numpy.linalg.inv( Hta )
-            EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
-            #
-            Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
-        #--------------------------
-        elif VariantM == "FiniteSize15": # Jauge Boc2015
-            HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
-            def CostFunction(w):
-                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
-                _Jo = 0.5 * _A.T * (RI * _A)
-                _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
-                _J  = _Jo + _Jb
-                return float(_J)
-            def GradientOfCostFunction(w):
-                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
-                _GardJo = - EaHX.T @ (RI * _A)
-                _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
-                _GradJ  = _GardJo + _GradJb
-                return numpy.ravel(_GradJ)
-            vw = scipy.optimize.fmin_cg(
-                f           = CostFunction,
-                x0          = numpy.zeros(__m),
-                fprime      = GradientOfCostFunction,
-                args        = (),
-                disp        = False,
-                )
-            #
-            Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
-            Htb = (__m+1) * \
-                ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
-                / (1 + 1/__m + vw.T @ vw)**2
-            Hta = Hto + Htb
-            #
-            Pta = numpy.linalg.inv( Hta )
-            EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
-            #
-            Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
-        #--------------------------
-        elif VariantM == "FiniteSize16": # Jauge Boc2016
-            HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
-            def CostFunction(w):
-                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
-                _Jo = 0.5 * _A.T @ (RI * _A)
-                _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
-                _J  = _Jo + _Jb
-                return float(_J)
-            def GradientOfCostFunction(w):
-                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
-                _GardJo = - EaHX.T @ (RI * _A)
-                _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
-                _GradJ  = _GardJo + _GradJb
-                return numpy.ravel(_GradJ)
-            vw = scipy.optimize.fmin_cg(
-                f           = CostFunction,
-                x0          = numpy.zeros(__m),
-                fprime      = GradientOfCostFunction,
-                args        = (),
-                disp        = False,
-                )
-            #
-            Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
-            Htb = ((__m+1) / (__m-1)) * \
-                ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
-                / (1 + 1/__m + vw.T @ vw / (__m-1))**2
-            Hta = Hto + Htb
-            #
-            Pta = numpy.linalg.inv( Hta )
-            EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
-            #
-            Xn  = Xfm + EaX @ (vw[:,None] + EWa)
-        #--------------------------
-        else:
-            raise ValueError("VariantM has to be chosen in the authorized methods list.")
-        #
-        if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
-            Xn = CovarianceInflation( Xn,
-                selfA._parameters["InflationType"],
-                selfA._parameters["InflationFactor"],
-                )
-        #
-        if Hybrid == "E3DVAR":
-            betaf = selfA._parameters["HybridCovarianceEquilibrium"]
-            Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
-        #
-        Xa = EnsembleMean( Xn )
-        #--------------------------
-        selfA._setInternalState("Xn", Xn)
-        selfA._setInternalState("seed", numpy.random.get_state())
-        #--------------------------
-        #
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CostFunctionJ") \
-            or selfA._toStore("CostFunctionJb") \
-            or selfA._toStore("CostFunctionJo") \
-            or selfA._toStore("APosterioriCovariance") \
-            or selfA._toStore("InnovationAtCurrentAnalysis") \
-            or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
-            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
-            _Innovation = Ynpu - _HXa
-        #
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        # ---> avec analysis
-        selfA.StoredVariables["Analysis"].store( Xa )
-        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
-        if selfA._toStore("InnovationAtCurrentAnalysis"):
-            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
-        # ---> avec current state
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CurrentState"):
-            selfA.StoredVariables["CurrentState"].store( Xn )
-        if selfA._toStore("ForecastState"):
-            selfA.StoredVariables["ForecastState"].store( EMX )
-        if selfA._toStore("ForecastCovariance"):
-            selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
-        if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( EMX - Xa )
-        if selfA._toStore("InnovationAtCurrentState"):
-            selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
-        if selfA._toStore("SimulatedObservationAtCurrentState") \
-            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
-        # ---> autres
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CostFunctionJ") \
-            or selfA._toStore("CostFunctionJb") \
-            or selfA._toStore("CostFunctionJo") \
-            or selfA._toStore("CurrentOptimum") \
-            or selfA._toStore("APosterioriCovariance"):
-            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
-            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
-            J   = Jb + Jo
-            selfA.StoredVariables["CostFunctionJb"].store( Jb )
-            selfA.StoredVariables["CostFunctionJo"].store( Jo )
-            selfA.StoredVariables["CostFunctionJ" ].store( J )
-            #
-            if selfA._toStore("IndexOfOptimum") \
-                or selfA._toStore("CurrentOptimum") \
-                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
-                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
-                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
-                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-            if selfA._toStore("IndexOfOptimum"):
-                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
-            if selfA._toStore("CurrentOptimum"):
-                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
-            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
-            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
-            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
-            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
-        if selfA._parameters["EstimationOf"] == "Parameters" \
-            and J < previousJMinimum:
-            previousJMinimum    = J
-            XaMin               = Xa
-            if selfA._toStore("APosterioriCovariance"):
-                covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
-        # ---> Pour les smoothers
-        if selfA._toStore("CurrentEnsembleState"):
-            selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
-    #
-    # Stockage final supplémentaire de l'optimum en estimation de paramètres
-    # ----------------------------------------------------------------------
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        selfA.StoredVariables["Analysis"].store( XaMin )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
-        if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
-    #
-    return 0
-
-# ==============================================================================
-def exkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
-    """
-    Extended Kalman Filter
-    """
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        selfA._parameters["StoreInternalVariables"] = True
-    #
-    # Opérateurs
-    H = HO["Direct"].appliedControledFormTo
-    #
-    if selfA._parameters["EstimationOf"] == "State":
-        M = EM["Direct"].appliedControledFormTo
-    #
-    if CM is not None and "Tangent" in CM and U is not None:
-        Cm = CM["Tangent"].asMatrix(Xb)
-    else:
-        Cm = None
-    #
-    # Durée d'observation et tailles
-    if hasattr(Y,"stepnumber"):
-        duration = Y.stepnumber()
-        __p = numpy.cumprod(Y.shape())[-1]
-    else:
-        duration = 2
-        __p = numpy.array(Y).size
-    #
-    # Précalcul des inversions de B et R
-    if selfA._parameters["StoreInternalVariables"] \
-        or selfA._toStore("CostFunctionJ") \
-        or selfA._toStore("CostFunctionJb") \
-        or selfA._toStore("CostFunctionJo") \
-        or selfA._toStore("CurrentOptimum") \
-        or selfA._toStore("APosterioriCovariance"):
-        BI = B.getI()
-        RI = R.getI()
-    #
-    __n = Xb.size
-    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
-    #
-    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
-        Xn = Xb
-        Pn = B
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        selfA.StoredVariables["Analysis"].store( Xb )
-        if selfA._toStore("APosterioriCovariance"):
-            if hasattr(B,"asfullmatrix"):
-                selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
-            else:
-                selfA.StoredVariables["APosterioriCovariance"].store( B )
-        selfA._setInternalState("seed", numpy.random.get_state())
-    elif selfA._parameters["nextStep"]:
-        Xn = selfA._getInternalState("Xn")
-        Pn = selfA._getInternalState("Pn")
-    #
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        XaMin            = Xn
-        previousJMinimum = numpy.finfo(float).max
-    #
-    for step in range(duration-1):
-        if hasattr(Y,"store"):
-            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
-        else:
-            Ynpu = numpy.ravel( Y ).reshape((__p,1))
-        #
-        Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
-        Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
-        Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
-        Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
-        #
-        if selfA._parameters["EstimationOf"] == "State":
-            Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
-            Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
-            Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
-            Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
-        #
-        if U is not None:
-            if hasattr(U,"store") and len(U)>1:
-                Un = numpy.ravel( U[step] ).reshape((-1,1))
-            elif hasattr(U,"store") and len(U)==1:
-                Un = numpy.ravel( U[0] ).reshape((-1,1))
-            else:
-                Un = numpy.ravel( U ).reshape((-1,1))
-        else:
-            Un = None
-        #
-        if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
-            Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
-            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
-                Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
-                Xn_predicted = Xn_predicted + Cm @ Un
-            Pn_predicted = Q + Mt * (Pn * Ma)
-        elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
-            # --- > Par principe, M = Id, Q = 0
-            Xn_predicted = Xn
-            Pn_predicted = Pn
-        #
-        if selfA._parameters["EstimationOf"] == "State":
-            HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
-            _Innovation  = Ynpu - HX_predicted
-        elif selfA._parameters["EstimationOf"] == "Parameters":
-            HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
-            _Innovation  = Ynpu - HX_predicted
-            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
-                _Innovation = _Innovation - Cm @ Un
-        #
-        Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
-        Xn = Xn_predicted + Kn * _Innovation
-        Pn = Pn_predicted - Kn * Ht * Pn_predicted
-        #
-        Xa = Xn # Pointeurs
-        #--------------------------
-        selfA._setInternalState("Xn", Xn)
-        selfA._setInternalState("Pn", Pn)
-        #--------------------------
-        #
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        # ---> avec analysis
-        selfA.StoredVariables["Analysis"].store( Xa )
-        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
-        if selfA._toStore("InnovationAtCurrentAnalysis"):
-            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
-        # ---> avec current state
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CurrentState"):
-            selfA.StoredVariables["CurrentState"].store( Xn )
-        if selfA._toStore("ForecastState"):
-            selfA.StoredVariables["ForecastState"].store( Xn_predicted )
-        if selfA._toStore("ForecastCovariance"):
-            selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
-        if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
-        if selfA._toStore("InnovationAtCurrentState"):
-            selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
-        if selfA._toStore("SimulatedObservationAtCurrentState") \
-            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
-        # ---> autres
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CostFunctionJ") \
-            or selfA._toStore("CostFunctionJb") \
-            or selfA._toStore("CostFunctionJo") \
-            or selfA._toStore("CurrentOptimum") \
-            or selfA._toStore("APosterioriCovariance"):
-            Jb  = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
-            Jo  = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
-            J   = Jb + Jo
-            selfA.StoredVariables["CostFunctionJb"].store( Jb )
-            selfA.StoredVariables["CostFunctionJo"].store( Jo )
-            selfA.StoredVariables["CostFunctionJ" ].store( J )
-            #
-            if selfA._toStore("IndexOfOptimum") \
-                or selfA._toStore("CurrentOptimum") \
-                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
-                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
-                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
-                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-            if selfA._toStore("IndexOfOptimum"):
-                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
-            if selfA._toStore("CurrentOptimum"):
-                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
-            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
-            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
-            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
-            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
-        if selfA._parameters["EstimationOf"] == "Parameters" \
-            and J < previousJMinimum:
-            previousJMinimum    = J
-            XaMin               = Xa
-            if selfA._toStore("APosterioriCovariance"):
-                covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
-    #
-    # Stockage final supplémentaire de l'optimum en estimation de paramètres
-    # ----------------------------------------------------------------------
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        selfA.StoredVariables["Analysis"].store( XaMin )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
-        if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
-    #
-    return 0
-
-# ==============================================================================
-def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
-    BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
-    """
-    Iterative EnKF
-    """
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        selfA._parameters["StoreInternalVariables"] = True
-    #
-    # Opérateurs
-    H = HO["Direct"].appliedControledFormTo
-    #
-    if selfA._parameters["EstimationOf"] == "State":
-        M = EM["Direct"].appliedControledFormTo
-    #
-    if CM is not None and "Tangent" in CM and U is not None:
-        Cm = CM["Tangent"].asMatrix(Xb)
-    else:
-        Cm = None
-    #
-    # Durée d'observation et tailles
-    if hasattr(Y,"stepnumber"):
-        duration = Y.stepnumber()
-        __p = numpy.cumprod(Y.shape())[-1]
-    else:
-        duration = 2
-        __p = numpy.array(Y).size
-    #
-    # Précalcul des inversions de B et R
-    if selfA._parameters["StoreInternalVariables"] \
-        or selfA._toStore("CostFunctionJ") \
-        or selfA._toStore("CostFunctionJb") \
-        or selfA._toStore("CostFunctionJo") \
-        or selfA._toStore("CurrentOptimum") \
-        or selfA._toStore("APosterioriCovariance"):
-        BI = B.getI()
-    RI = R.getI()
-    #
-    __n = Xb.size
-    __m = selfA._parameters["NumberOfMembers"]
-    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
-    previousJMinimum = numpy.finfo(float).max
-    #
-    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
-        if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
-        else:                         Pn = B
-        Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
-        selfA.StoredVariables["Analysis"].store( Xb )
-        if selfA._toStore("APosterioriCovariance"):
-            if hasattr(B,"asfullmatrix"):
-                selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
-            else:
-                selfA.StoredVariables["APosterioriCovariance"].store( B )
-        selfA._setInternalState("seed", numpy.random.get_state())
-    elif selfA._parameters["nextStep"]:
-        Xn = selfA._getInternalState("Xn")
-    #
-    for step in range(duration-1):
-        numpy.random.set_state(selfA._getInternalState("seed"))
-        if hasattr(Y,"store"):
-            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
-        else:
-            Ynpu = numpy.ravel( Y ).reshape((__p,1))
-        #
-        if U is not None:
-            if hasattr(U,"store") and len(U)>1:
-                Un = numpy.ravel( U[step] ).reshape((-1,1))
-            elif hasattr(U,"store") and len(U)==1:
-                Un = numpy.ravel( U[0] ).reshape((-1,1))
-            else:
-                Un = numpy.ravel( U ).reshape((-1,1))
-        else:
-            Un = None
-        #
-        if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
-            Xn = CovarianceInflation( Xn,
-                selfA._parameters["InflationType"],
-                selfA._parameters["InflationFactor"],
-                )
-        #
-        #--------------------------
-        if VariantM == "IEnKF12":
-            Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
-            EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
-            __j = 0
-            Deltaw = 1
-            if not BnotT:
-                Ta  = numpy.identity(__m)
-            vw  = numpy.zeros(__m)
-            while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
-                vx1 = (Xfm + EaX @ vw).reshape((__n,1))
-                #
-                if BnotT:
-                    E1 = vx1 + _epsilon * EaX
-                else:
-                    E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
-                #
-                if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
-                    E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
-                        argsAsSerie = True,
-                        returnSerieAsArrayMatrix = True )
-                elif selfA._parameters["EstimationOf"] == "Parameters":
-                    # --- > Par principe, M = Id
-                    E2 = Xn
-                vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
-                vy1 = H((vx2, Un)).reshape((__p,1))
-                #
-                HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
-                    argsAsSerie = True,
-                    returnSerieAsArrayMatrix = True )
-                vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
-                #
-                if BnotT:
-                    EaY = (HE2 - vy2) / _epsilon
-                else:
-                    EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
-                #
-                GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
-                mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
-                Deltaw = - numpy.linalg.solve(mH,GradJ)
-                #
-                vw = vw + Deltaw
-                #
-                if not BnotT:
-                    Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
-                #
-                __j = __j + 1
-            #
-            A2 = EnsembleOfAnomalies( E2 )
-            #
-            if BnotT:
-                Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
-                A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
-            #
-            Xn = vx2 + A2
-        #--------------------------
-        else:
-            raise ValueError("VariantM has to be chosen in the authorized methods list.")
-        #
-        if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
-            Xn = CovarianceInflation( Xn,
-                selfA._parameters["InflationType"],
-                selfA._parameters["InflationFactor"],
-                )
-        #
-        Xa = EnsembleMean( Xn )
-        #--------------------------
-        selfA._setInternalState("Xn", Xn)
-        selfA._setInternalState("seed", numpy.random.get_state())
-        #--------------------------
-        #
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CostFunctionJ") \
-            or selfA._toStore("CostFunctionJb") \
-            or selfA._toStore("CostFunctionJo") \
-            or selfA._toStore("APosterioriCovariance") \
-            or selfA._toStore("InnovationAtCurrentAnalysis") \
-            or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
-            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
-            _Innovation = Ynpu - _HXa
-        #
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        # ---> avec analysis
-        selfA.StoredVariables["Analysis"].store( Xa )
-        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
-        if selfA._toStore("InnovationAtCurrentAnalysis"):
-            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
-        # ---> avec current state
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CurrentState"):
-            selfA.StoredVariables["CurrentState"].store( Xn )
-        if selfA._toStore("ForecastState"):
-            selfA.StoredVariables["ForecastState"].store( E2 )
-        if selfA._toStore("ForecastCovariance"):
-            selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(E2) )
-        if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( E2 - Xa )
-        if selfA._toStore("InnovationAtCurrentState"):
-            selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
-        if selfA._toStore("SimulatedObservationAtCurrentState") \
-            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
-        # ---> autres
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CostFunctionJ") \
-            or selfA._toStore("CostFunctionJb") \
-            or selfA._toStore("CostFunctionJo") \
-            or selfA._toStore("CurrentOptimum") \
-            or selfA._toStore("APosterioriCovariance"):
-            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
-            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
-            J   = Jb + Jo
-            selfA.StoredVariables["CostFunctionJb"].store( Jb )
-            selfA.StoredVariables["CostFunctionJo"].store( Jo )
-            selfA.StoredVariables["CostFunctionJ" ].store( J )
-            #
-            if selfA._toStore("IndexOfOptimum") \
-                or selfA._toStore("CurrentOptimum") \
-                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
-                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
-                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
-                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-            if selfA._toStore("IndexOfOptimum"):
-                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
-            if selfA._toStore("CurrentOptimum"):
-                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
-            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
-            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
-            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
-            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
-        if selfA._parameters["EstimationOf"] == "Parameters" \
-            and J < previousJMinimum:
-            previousJMinimum    = J
-            XaMin               = Xa
-            if selfA._toStore("APosterioriCovariance"):
-                covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
-        # ---> Pour les smoothers
-        if selfA._toStore("CurrentEnsembleState"):
-            selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
-    #
-    # Stockage final supplémentaire de l'optimum en estimation de paramètres
-    # ----------------------------------------------------------------------
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        selfA.StoredVariables["Analysis"].store( XaMin )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
-        if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
-    #
-    return 0
-
-# ==============================================================================
-def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
-    """
-    3DVAR incrémental
-    """
-    #
-    # Initialisations
-    # ---------------
-    Hm = HO["Direct"].appliedTo
-    #
-    BI = B.getI()
-    RI = R.getI()
-    #
-    HXb = numpy.asarray(Hm( Xb )).reshape((-1,1))
-    Innovation = Y - HXb
-    #
-    # Outer Loop
-    # ----------
-    iOuter = 0
-    J      = 1./mpr
-    DeltaJ = 1./mpr
-    Xr     = numpy.asarray(selfA._parameters["InitializationPoint"]).reshape((-1,1))
-    while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
-        #
-        # Inner Loop
-        # ----------
-        Ht = HO["Tangent"].asMatrix(Xr)
-        Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
-        #
-        # Définition de la fonction-coût
-        # ------------------------------
-        def CostFunction(dx):
-            _dX  = numpy.asarray(dx).reshape((-1,1))
-            if selfA._parameters["StoreInternalVariables"] or \
-                selfA._toStore("CurrentState") or \
-                selfA._toStore("CurrentOptimum"):
-                selfA.StoredVariables["CurrentState"].store( Xb + _dX )
-            _HdX = (Ht @ _dX).reshape((-1,1))
-            _dInnovation = Innovation - _HdX
-            if selfA._toStore("SimulatedObservationAtCurrentState") or \
-                selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
-            if selfA._toStore("InnovationAtCurrentState"):
-                selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
-            #
-            Jb  = float( 0.5 * _dX.T * (BI * _dX) )
-            Jo  = float( 0.5 * _dInnovation.T * (RI * _dInnovation) )
-            J   = Jb + Jo
-            #
-            selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
-            selfA.StoredVariables["CostFunctionJb"].store( Jb )
-            selfA.StoredVariables["CostFunctionJo"].store( Jo )
-            selfA.StoredVariables["CostFunctionJ" ].store( J )
-            if selfA._toStore("IndexOfOptimum") or \
-                selfA._toStore("CurrentOptimum") or \
-                selfA._toStore("CostFunctionJAtCurrentOptimum") or \
-                selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
-                selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
-                selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-            if selfA._toStore("IndexOfOptimum"):
-                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
-            if selfA._toStore("CurrentOptimum"):
-                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
-            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
-            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
-            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
-            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
-            return J
-        #
-        def GradientOfCostFunction(dx):
-            _dX          = numpy.ravel( dx )
-            _HdX         = (Ht @ _dX).reshape((-1,1))
-            _dInnovation = Innovation - _HdX
-            GradJb       = BI @ _dX
-            GradJo       = - Ht.T @ (RI * _dInnovation)
-            GradJ        = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
-            return GradJ
-        #
-        # Minimisation de la fonctionnelle
-        # --------------------------------
-        nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
-        #
-        if selfA._parameters["Minimizer"] == "LBFGSB":
-            # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
-            if "0.19" <= scipy.version.version <= "1.1.0":
-                import lbfgsbhlt as optimiseur
-            else:
-                import scipy.optimize as optimiseur
-            Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
-                func        = CostFunction,
-                x0          = numpy.zeros(Xb.size),
-                fprime      = GradientOfCostFunction,
-                args        = (),
-                bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
-                maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
-                factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
-                pgtol       = selfA._parameters["ProjectedGradientTolerance"],
-                iprint      = selfA._parameters["optiprint"],
-                )
-            nfeval = Informations['funcalls']
-            rc     = Informations['warnflag']
-        elif selfA._parameters["Minimizer"] == "TNC":
-            Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
-                func        = CostFunction,
-                x0          = numpy.zeros(Xb.size),
-                fprime      = GradientOfCostFunction,
-                args        = (),
-                bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
-                maxfun      = selfA._parameters["MaximumNumberOfSteps"],
-                pgtol       = selfA._parameters["ProjectedGradientTolerance"],
-                ftol        = selfA._parameters["CostDecrementTolerance"],
-                messages    = selfA._parameters["optmessages"],
-                )
-        elif selfA._parameters["Minimizer"] == "CG":
-            Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
-                f           = CostFunction,
-                x0          = numpy.zeros(Xb.size),
-                fprime      = GradientOfCostFunction,
-                args        = (),
-                maxiter     = selfA._parameters["MaximumNumberOfSteps"],
-                gtol        = selfA._parameters["GradientNormTolerance"],
-                disp        = selfA._parameters["optdisp"],
-                full_output = True,
-                )
-        elif selfA._parameters["Minimizer"] == "NCG":
-            Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
-                f           = CostFunction,
-                x0          = numpy.zeros(Xb.size),
-                fprime      = GradientOfCostFunction,
-                args        = (),
-                maxiter     = selfA._parameters["MaximumNumberOfSteps"],
-                avextol     = selfA._parameters["CostDecrementTolerance"],
-                disp        = selfA._parameters["optdisp"],
-                full_output = True,
-                )
-        elif selfA._parameters["Minimizer"] == "BFGS":
-            Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
-                f           = CostFunction,
-                x0          = numpy.zeros(Xb.size),
-                fprime      = GradientOfCostFunction,
-                args        = (),
-                maxiter     = selfA._parameters["MaximumNumberOfSteps"],
-                gtol        = selfA._parameters["GradientNormTolerance"],
-                disp        = selfA._parameters["optdisp"],
-                full_output = True,
-                )
-        else:
-            raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
-        #
-        IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-        MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
-        #
-        if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
-            Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
-        else:
-            Minimum = Xb + Minimum.reshape((-1,1))
-        #
-        Xr     = Minimum
-        DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
-        iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
-    #
-    Xa = Xr
-    #--------------------------
-    #
-    selfA.StoredVariables["Analysis"].store( Xa )
-    #
-    if selfA._toStore("OMA") or \
-        selfA._toStore("SigmaObs2") or \
-        selfA._toStore("SimulationQuantiles") or \
-        selfA._toStore("SimulatedObservationAtOptimum"):
-        if selfA._toStore("SimulatedObservationAtCurrentState"):
-            HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
-        elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
-        else:
-            HXa = Hm( Xa )
-    #
-    if selfA._toStore("APosterioriCovariance") or \
-        selfA._toStore("SimulationQuantiles") or \
-        selfA._toStore("JacobianMatrixAtOptimum") or \
-        selfA._toStore("KalmanGainAtOptimum"):
-        HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
-        HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
-    if selfA._toStore("APosterioriCovariance") or \
-        selfA._toStore("SimulationQuantiles") or \
-        selfA._toStore("KalmanGainAtOptimum"):
-        HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
-        HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
-    if selfA._toStore("APosterioriCovariance") or \
-        selfA._toStore("SimulationQuantiles"):
-        A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
-    if selfA._toStore("APosterioriCovariance"):
-        selfA.StoredVariables["APosterioriCovariance"].store( A )
-    if selfA._toStore("JacobianMatrixAtOptimum"):
-        selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
-    if selfA._toStore("KalmanGainAtOptimum"):
-        if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
-        elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
-        selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
-    #
-    # Calculs et/ou stockages supplémentaires
-    # ---------------------------------------
-    if selfA._toStore("Innovation") or \
-        selfA._toStore("SigmaObs2") or \
-        selfA._toStore("MahalanobisConsistency") or \
-        selfA._toStore("OMB"):
-        d  = Y - HXb
-    if selfA._toStore("Innovation"):
-        selfA.StoredVariables["Innovation"].store( d )
-    if selfA._toStore("BMA"):
-        selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
-    if selfA._toStore("OMA"):
-        selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
-    if selfA._toStore("OMB"):
-        selfA.StoredVariables["OMB"].store( d )
-    if selfA._toStore("SigmaObs2"):
-        TraceR = R.trace(Y.size)
-        selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
-    if selfA._toStore("MahalanobisConsistency"):
-        selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
-    if selfA._toStore("SimulationQuantiles"):
-        QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
-    if selfA._toStore("SimulatedObservationAtBackground"):
-        selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
-    if selfA._toStore("SimulatedObservationAtOptimum"):
-        selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
-    #
-    return 0
-
-# ==============================================================================
-def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
-    VariantM="MLEF13", BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000,
-    Hybrid=None,
-    ):
-    """
-    Maximum Likelihood Ensemble Filter
-    """
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        selfA._parameters["StoreInternalVariables"] = True
-    #
-    # Opérateurs
-    H = HO["Direct"].appliedControledFormTo
-    #
-    if selfA._parameters["EstimationOf"] == "State":
-        M = EM["Direct"].appliedControledFormTo
-    #
-    if CM is not None and "Tangent" in CM and U is not None:
-        Cm = CM["Tangent"].asMatrix(Xb)
-    else:
-        Cm = None
-    #
-    # Durée d'observation et tailles
-    if hasattr(Y,"stepnumber"):
-        duration = Y.stepnumber()
-        __p = numpy.cumprod(Y.shape())[-1]
-    else:
-        duration = 2
-        __p = numpy.array(Y).size
-    #
-    # Précalcul des inversions de B et R
-    if selfA._parameters["StoreInternalVariables"] \
-        or selfA._toStore("CostFunctionJ") \
-        or selfA._toStore("CostFunctionJb") \
-        or selfA._toStore("CostFunctionJo") \
-        or selfA._toStore("CurrentOptimum") \
-        or selfA._toStore("APosterioriCovariance"):
-        BI = B.getI()
-    RI = R.getI()
-    #
-    __n = Xb.size
-    __m = selfA._parameters["NumberOfMembers"]
-    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
-    previousJMinimum = numpy.finfo(float).max
-    #
-    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
-        Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
-        selfA.StoredVariables["Analysis"].store( Xb )
-        if selfA._toStore("APosterioriCovariance"):
-            if hasattr(B,"asfullmatrix"):
-                selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
-            else:
-                selfA.StoredVariables["APosterioriCovariance"].store( B )
-        selfA._setInternalState("seed", numpy.random.get_state())
-    elif selfA._parameters["nextStep"]:
-        Xn = selfA._getInternalState("Xn")
-    #
-    for step in range(duration-1):
-        numpy.random.set_state(selfA._getInternalState("seed"))
-        if hasattr(Y,"store"):
-            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
-        else:
-            Ynpu = numpy.ravel( Y ).reshape((__p,1))
-        #
-        if U is not None:
-            if hasattr(U,"store") and len(U)>1:
-                Un = numpy.ravel( U[step] ).reshape((-1,1))
-            elif hasattr(U,"store") and len(U)==1:
-                Un = numpy.ravel( U[0] ).reshape((-1,1))
-            else:
-                Un = numpy.ravel( U ).reshape((-1,1))
-        else:
-            Un = None
-        #
-        if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
-            Xn = CovarianceInflation( Xn,
-                selfA._parameters["InflationType"],
-                selfA._parameters["InflationFactor"],
-                )
-        #
-        if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
-            EMX = M( [(Xn[:,i], Un) for i in range(__m)],
-                argsAsSerie = True,
-                returnSerieAsArrayMatrix = True )
-            Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
-            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
-                Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
-                Xn_predicted = Xn_predicted + Cm @ Un
-        elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
-            # --- > Par principe, M = Id, Q = 0
-            Xn_predicted = EMX = Xn
-        #
-        #--------------------------
-        if VariantM == "MLEF13":
-            Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
-            EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
-            Ua  = numpy.identity(__m)
-            __j = 0
-            Deltaw = 1
-            if not BnotT:
-                Ta  = numpy.identity(__m)
-            vw  = numpy.zeros(__m)
-            while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
-                vx1 = (Xfm + EaX @ vw).reshape((__n,1))
-                #
-                if BnotT:
-                    E1 = vx1 + _epsilon * EaX
-                else:
-                    E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
-                #
-                HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
-                    argsAsSerie = True,
-                    returnSerieAsArrayMatrix = True )
-                vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
-                #
-                if BnotT:
-                    EaY = (HE2 - vy2) / _epsilon
-                else:
-                    EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
-                #
-                GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
-                mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
-                Deltaw = - numpy.linalg.solve(mH,GradJ)
-                #
-                vw = vw + Deltaw
-                #
-                if not BnotT:
-                    Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
-                #
-                __j = __j + 1
-            #
-            if BnotT:
-                Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
-            #
-            Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
-        #--------------------------
-        else:
-            raise ValueError("VariantM has to be chosen in the authorized methods list.")
-        #
-        if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
-            Xn = CovarianceInflation( Xn,
-                selfA._parameters["InflationType"],
-                selfA._parameters["InflationFactor"],
-                )
-        #
-        if Hybrid == "E3DVAR":
-            betaf = selfA._parameters["HybridCovarianceEquilibrium"]
-            Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
-        #
-        Xa = EnsembleMean( Xn )
-        #--------------------------
-        selfA._setInternalState("Xn", Xn)
-        selfA._setInternalState("seed", numpy.random.get_state())
-        #--------------------------
-        #
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CostFunctionJ") \
-            or selfA._toStore("CostFunctionJb") \
-            or selfA._toStore("CostFunctionJo") \
-            or selfA._toStore("APosterioriCovariance") \
-            or selfA._toStore("InnovationAtCurrentAnalysis") \
-            or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
-            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
-            _Innovation = Ynpu - _HXa
-        #
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        # ---> avec analysis
-        selfA.StoredVariables["Analysis"].store( Xa )
-        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
-        if selfA._toStore("InnovationAtCurrentAnalysis"):
-            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
-        # ---> avec current state
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CurrentState"):
-            selfA.StoredVariables["CurrentState"].store( Xn )
-        if selfA._toStore("ForecastState"):
-            selfA.StoredVariables["ForecastState"].store( EMX )
-        if selfA._toStore("ForecastCovariance"):
-            selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
-        if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( EMX - Xa )
-        if selfA._toStore("InnovationAtCurrentState"):
-            selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
-        if selfA._toStore("SimulatedObservationAtCurrentState") \
-            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
-        # ---> autres
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CostFunctionJ") \
-            or selfA._toStore("CostFunctionJb") \
-            or selfA._toStore("CostFunctionJo") \
-            or selfA._toStore("CurrentOptimum") \
-            or selfA._toStore("APosterioriCovariance"):
-            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
-            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
-            J   = Jb + Jo
-            selfA.StoredVariables["CostFunctionJb"].store( Jb )
-            selfA.StoredVariables["CostFunctionJo"].store( Jo )
-            selfA.StoredVariables["CostFunctionJ" ].store( J )
-            #
-            if selfA._toStore("IndexOfOptimum") \
-                or selfA._toStore("CurrentOptimum") \
-                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
-                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
-                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
-                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-            if selfA._toStore("IndexOfOptimum"):
-                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
-            if selfA._toStore("CurrentOptimum"):
-                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
-            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
-            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
-            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
-            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
-        if selfA._parameters["EstimationOf"] == "Parameters" \
-            and J < previousJMinimum:
-            previousJMinimum    = J
-            XaMin               = Xa
-            if selfA._toStore("APosterioriCovariance"):
-                covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
-        # ---> Pour les smoothers
-        if selfA._toStore("CurrentEnsembleState"):
-            selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
-    #
-    # Stockage final supplémentaire de l'optimum en estimation de paramètres
-    # ----------------------------------------------------------------------
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        selfA.StoredVariables["Analysis"].store( XaMin )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
-        if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
-    #
-    return 0
-
-# ==============================================================================
-def mmqr(
-        func     = None,
-        x0       = None,
-        fprime   = None,
-        bounds   = None,
-        quantile = 0.5,
-        maxfun   = 15000,
-        toler    = 1.e-06,
-        y        = None,
-        ):
-    """
-    Implémentation informatique de l'algorithme MMQR, basée sur la publication :
-    David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
-    Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
-    """
-    #
-    # Recuperation des donnees et informations initiales
-    # --------------------------------------------------
-    variables = numpy.ravel( x0 )
-    mesures   = numpy.ravel( y )
-    increment = sys.float_info[0]
-    p         = variables.size
-    n         = mesures.size
-    quantile  = float(quantile)
-    #
-    # Calcul des parametres du MM
-    # ---------------------------
-    tn      = float(toler) / n
-    e0      = -tn / math.log(tn)
-    epsilon = (e0-tn)/(1+math.log(e0))
-    #
-    # Calculs d'initialisation
-    # ------------------------
-    residus  = mesures - numpy.ravel( func( variables ) )
-    poids    = 1./(epsilon+numpy.abs(residus))
-    veps     = 1. - 2. * quantile - residus * poids
-    lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
-    iteration = 0
-    #
-    # Recherche iterative
-    # -------------------
-    while (increment > toler) and (iteration < maxfun) :
-        iteration += 1
-        #
-        Derivees  = numpy.array(fprime(variables))
-        Derivees  = Derivees.reshape(n,p) # ADAO & check shape
-        DeriveesT = Derivees.transpose()
-        M         =   numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
-        SM        =   numpy.transpose(numpy.dot( DeriveesT , veps ))
-        step      = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
-        #
-        variables = variables + step
-        if bounds is not None:
-            # Attention : boucle infinie à éviter si un intervalle est trop petit
-            while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
-                step      = step/2.
-                variables = variables - step
-        residus   = mesures - numpy.ravel( func(variables) )
-        surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
-        #
-        while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
-            step      = step/2.
-            variables = variables - step
-            residus   = mesures - numpy.ravel( func(variables) )
-            surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
-        #
-        increment     = lastsurrogate-surrogate
-        poids         = 1./(epsilon+numpy.abs(residus))
-        veps          = 1. - 2. * quantile - residus * poids
-        lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
-    #
-    # Mesure d'écart
-    # --------------
-    Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
-    #
-    return variables, Ecart, [n,p,iteration,increment,0]
-
-# ==============================================================================
-def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
-    """
-    3DVAR multi-pas et multi-méthodes
-    """
-    #
-    # Initialisation
-    # --------------
-    if selfA._parameters["EstimationOf"] == "State":
-        M = EM["Direct"].appliedControledFormTo
-        if CM is not None and "Tangent" in CM and U is not None:
-            Cm = CM["Tangent"].asMatrix(Xb)
-        else:
-            Cm = None
-        #
-        if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
-            Xn = numpy.ravel(Xb).reshape((-1,1))
-            selfA.StoredVariables["Analysis"].store( Xn )
-            if selfA._toStore("APosterioriCovariance"):
-                if hasattr(B,"asfullmatrix"):
-                    selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(Xn.size) )
-                else:
-                    selfA.StoredVariables["APosterioriCovariance"].store( B )
-            if selfA._toStore("ForecastState"):
-                selfA.StoredVariables["ForecastState"].store( Xn )
-        elif selfA._parameters["nextStep"]:
-            Xn = selfA._getInternalState("Xn")
-    else:
-        Xn = numpy.ravel(Xb).reshape((-1,1))
-    #
-    if hasattr(Y,"stepnumber"):
-        duration = Y.stepnumber()
-    else:
-        duration = 2
-    #
-    # Multi-pas
-    for step in range(duration-1):
-        if hasattr(Y,"store"):
-            Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
-        else:
-            Ynpu = numpy.ravel( Y ).reshape((-1,1))
-        #
-        if U is not None:
-            if hasattr(U,"store") and len(U)>1:
-                Un = numpy.ravel( U[step] ).reshape((-1,1))
-            elif hasattr(U,"store") and len(U)==1:
-                Un = numpy.ravel( U[0] ).reshape((-1,1))
-            else:
-                Un = numpy.ravel( U ).reshape((-1,1))
-        else:
-            Un = None
-        #
-        if selfA._parameters["EstimationOf"] == "State": # Forecast
-            Xn_predicted = M( (Xn, Un) )
-            if selfA._toStore("ForecastState"):
-                selfA.StoredVariables["ForecastState"].store( Xn_predicted )
-            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
-                Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
-                Xn_predicted = Xn_predicted + Cm @ Un
-        elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
-            # --- > Par principe, M = Id, Q = 0
-            Xn_predicted = Xn
-        Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
-        #
-        oneCycle(selfA, Xn_predicted, Ynpu, None, HO, None, None, R, B, None)
-        #
-        Xn = selfA.StoredVariables["Analysis"][-1]
-        #--------------------------
-        selfA._setInternalState("Xn", Xn)
-    #
-    return 0
-
-# ==============================================================================
-def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
-    """
-    3DVAR PSAS
-    """
-    #
-    # Initialisations
-    # ---------------
-    Hm = HO["Direct"].appliedTo
-    #
-    if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
-        HXb = numpy.asarray(Hm( Xb, HO["AppliedInX"]["HXb"] ))
-    else:
-        HXb = numpy.asarray(Hm( Xb ))
-    HXb = numpy.ravel( HXb ).reshape((-1,1))
-    if Y.size != HXb.size:
-        raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
-    if max(Y.shape) != max(HXb.shape):
-        raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
-    #
-    if selfA._toStore("JacobianMatrixAtBackground"):
-        HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
-        HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
-        selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
-    #
-    Ht = HO["Tangent"].asMatrix(Xb)
-    BHT = B * Ht.T
-    HBHTpR = R + Ht * BHT
-    Innovation = Y - HXb
-    #
-    Xini = numpy.zeros(Y.size)
-    #
-    # Définition de la fonction-coût
-    # ------------------------------
-    def CostFunction(w):
-        _W = numpy.asarray(w).reshape((-1,1))
-        if selfA._parameters["StoreInternalVariables"] or \
-            selfA._toStore("CurrentState") or \
-            selfA._toStore("CurrentOptimum"):
-            selfA.StoredVariables["CurrentState"].store( Xb + BHT @ _W )
-        if selfA._toStore("SimulatedObservationAtCurrentState") or \
-            selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT @ _W ) )
-        if selfA._toStore("InnovationAtCurrentState"):
-            selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
-        #
-        Jb  = float( 0.5 * _W.T @ (HBHTpR @ _W) )
-        Jo  = float( - _W.T @ Innovation )
-        J   = Jb + Jo
-        #
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
-        selfA.StoredVariables["CostFunctionJb"].store( Jb )
-        selfA.StoredVariables["CostFunctionJo"].store( Jo )
-        selfA.StoredVariables["CostFunctionJ" ].store( J )
-        if selfA._toStore("IndexOfOptimum") or \
-            selfA._toStore("CurrentOptimum") or \
-            selfA._toStore("CostFunctionJAtCurrentOptimum") or \
-            selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
-            selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
-            selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-        if selfA._toStore("IndexOfOptimum"):
-            selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
-        if selfA._toStore("CurrentOptimum"):
-            selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
-        if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
-        if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
-            selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
-        if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
-            selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
-        if selfA._toStore("CostFunctionJAtCurrentOptimum"):
-            selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
-        return J
-    #
-    def GradientOfCostFunction(w):
-        _W = numpy.asarray(w).reshape((-1,1))
-        GradJb  = HBHTpR @ _W
-        GradJo  = - Innovation
-        GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
-        return GradJ
-    #
-    # Minimisation de la fonctionnelle
-    # --------------------------------
-    nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
-    #
-    if selfA._parameters["Minimizer"] == "LBFGSB":
-        if "0.19" <= scipy.version.version <= "1.1.0":
-            import lbfgsbhlt as optimiseur
-        else:
-            import scipy.optimize as optimiseur
-        Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
-            func        = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
-            factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
-            pgtol       = selfA._parameters["ProjectedGradientTolerance"],
-            iprint      = selfA._parameters["optiprint"],
-            )
-        nfeval = Informations['funcalls']
-        rc     = Informations['warnflag']
-    elif selfA._parameters["Minimizer"] == "TNC":
-        Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
-            func        = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            maxfun      = selfA._parameters["MaximumNumberOfSteps"],
-            pgtol       = selfA._parameters["ProjectedGradientTolerance"],
-            ftol        = selfA._parameters["CostDecrementTolerance"],
-            messages    = selfA._parameters["optmessages"],
-            )
-    elif selfA._parameters["Minimizer"] == "CG":
-        Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
-            f           = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
-            gtol        = selfA._parameters["GradientNormTolerance"],
-            disp        = selfA._parameters["optdisp"],
-            full_output = True,
-            )
-    elif selfA._parameters["Minimizer"] == "NCG":
-        Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
-            f           = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
-            avextol     = selfA._parameters["CostDecrementTolerance"],
-            disp        = selfA._parameters["optdisp"],
-            full_output = True,
-            )
-    elif selfA._parameters["Minimizer"] == "BFGS":
-        Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
-            f           = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
-            gtol        = selfA._parameters["GradientNormTolerance"],
-            disp        = selfA._parameters["optdisp"],
-            full_output = True,
-            )
-    else:
-        raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
-    #
-    IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-    MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
-    #
-    # Correction pour pallier a un bug de TNC sur le retour du Minimum
-    # ----------------------------------------------------------------
-    if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
-        Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
-    else:
-        Minimum = Xb + BHT @ Minimum.reshape((-1,1))
-    #
-    Xa = Minimum
-    #--------------------------
-    #
-    selfA.StoredVariables["Analysis"].store( Xa )
-    #
-    if selfA._toStore("OMA") or \
-        selfA._toStore("SigmaObs2") or \
-        selfA._toStore("SimulationQuantiles") or \
-        selfA._toStore("SimulatedObservationAtOptimum"):
-        if selfA._toStore("SimulatedObservationAtCurrentState"):
-            HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
-        elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
-        else:
-            HXa = Hm( Xa )
-    #
-    if selfA._toStore("APosterioriCovariance") or \
-        selfA._toStore("SimulationQuantiles") or \
-        selfA._toStore("JacobianMatrixAtOptimum") or \
-        selfA._toStore("KalmanGainAtOptimum"):
-        HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
-        HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
-    if selfA._toStore("APosterioriCovariance") or \
-        selfA._toStore("SimulationQuantiles") or \
-        selfA._toStore("KalmanGainAtOptimum"):
-        HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
-        HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
-    if selfA._toStore("APosterioriCovariance") or \
-        selfA._toStore("SimulationQuantiles"):
-        BI = B.getI()
-        RI = R.getI()
-        A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
-    if selfA._toStore("APosterioriCovariance"):
-        selfA.StoredVariables["APosterioriCovariance"].store( A )
-    if selfA._toStore("JacobianMatrixAtOptimum"):
-        selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
-    if selfA._toStore("KalmanGainAtOptimum"):
-        if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
-        elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
-        selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
-    #
-    # Calculs et/ou stockages supplémentaires
-    # ---------------------------------------
-    if selfA._toStore("Innovation") or \
-        selfA._toStore("SigmaObs2") or \
-        selfA._toStore("MahalanobisConsistency") or \
-        selfA._toStore("OMB"):
-        d  = Y - HXb
-    if selfA._toStore("Innovation"):
-        selfA.StoredVariables["Innovation"].store( d )
-    if selfA._toStore("BMA"):
-        selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
-    if selfA._toStore("OMA"):
-        selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
-    if selfA._toStore("OMB"):
-        selfA.StoredVariables["OMB"].store( d )
-    if selfA._toStore("SigmaObs2"):
-        TraceR = R.trace(Y.size)
-        selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
-    if selfA._toStore("MahalanobisConsistency"):
-        selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
-    if selfA._toStore("SimulationQuantiles"):
-        QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
-    if selfA._toStore("SimulatedObservationAtBackground"):
-        selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
-    if selfA._toStore("SimulatedObservationAtOptimum"):
-        selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
-    #
-    return 0
-
-# ==============================================================================
-def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
-    VariantM="KalmanFilterFormula16",
-    Hybrid=None,
-    ):
-    """
-    Stochastic EnKF
-    """
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        selfA._parameters["StoreInternalVariables"] = True
-    #
-    # Opérateurs
-    H = HO["Direct"].appliedControledFormTo
-    #
-    if selfA._parameters["EstimationOf"] == "State":
-        M = EM["Direct"].appliedControledFormTo
-    #
-    if CM is not None and "Tangent" in CM and U is not None:
-        Cm = CM["Tangent"].asMatrix(Xb)
-    else:
-        Cm = None
-    #
-    # Durée d'observation et tailles
-    if hasattr(Y,"stepnumber"):
-        duration = Y.stepnumber()
-        __p = numpy.cumprod(Y.shape())[-1]
-    else:
-        duration = 2
-        __p = numpy.array(Y).size
-    #
-    # Précalcul des inversions de B et R
-    if selfA._parameters["StoreInternalVariables"] \
-        or selfA._toStore("CostFunctionJ") \
-        or selfA._toStore("CostFunctionJb") \
-        or selfA._toStore("CostFunctionJo") \
-        or selfA._toStore("CurrentOptimum") \
-        or selfA._toStore("APosterioriCovariance"):
-        BI = B.getI()
-        RI = R.getI()
-    #
-    __n = Xb.size
-    __m = selfA._parameters["NumberOfMembers"]
-    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
-    previousJMinimum = numpy.finfo(float).max
-    #
-    if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
-    else:                         Rn = R
-    #
-    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
-        if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
-        else:                         Pn = B
-        Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
-        selfA.StoredVariables["Analysis"].store( Xb )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
-        selfA._setInternalState("seed", numpy.random.get_state())
-    elif selfA._parameters["nextStep"]:
-        Xn = selfA._getInternalState("Xn")
-    #
-    for step in range(duration-1):
-        numpy.random.set_state(selfA._getInternalState("seed"))
-        if hasattr(Y,"store"):
-            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
-        else:
-            Ynpu = numpy.ravel( Y ).reshape((__p,1))
-        #
-        if U is not None:
-            if hasattr(U,"store") and len(U)>1:
-                Un = numpy.ravel( U[step] ).reshape((-1,1))
-            elif hasattr(U,"store") and len(U)==1:
-                Un = numpy.ravel( U[0] ).reshape((-1,1))
-            else:
-                Un = numpy.ravel( U ).reshape((-1,1))
-        else:
-            Un = None
-        #
-        if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
-            Xn = CovarianceInflation( Xn,
-                selfA._parameters["InflationType"],
-                selfA._parameters["InflationFactor"],
-                )
-        #
-        if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
-            EMX = M( [(Xn[:,i], Un) for i in range(__m)],
-                argsAsSerie = True,
-                returnSerieAsArrayMatrix = True )
-            Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
-            HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
-                argsAsSerie = True,
-                returnSerieAsArrayMatrix = True )
-            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
-                Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
-                Xn_predicted = Xn_predicted + Cm @ Un
-        elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
-            # --- > Par principe, M = Id, Q = 0
-            Xn_predicted = EMX = Xn
-            HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
-                argsAsSerie = True,
-                returnSerieAsArrayMatrix = True )
-        #
-        # Mean of forecast and observation of forecast
-        Xfm  = EnsembleMean( Xn_predicted )
-        Hfm  = EnsembleMean( HX_predicted )
-        #
-        #--------------------------
-        if VariantM == "KalmanFilterFormula05":
-            PfHT, HPfHT = 0., 0.
-            for i in range(__m):
-                Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
-                Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
-                PfHT  += Exfi * Eyfi.T
-                HPfHT += Eyfi * Eyfi.T
-            PfHT  = (1./(__m-1)) * PfHT
-            HPfHT = (1./(__m-1)) * HPfHT
-            Kn     = PfHT * ( R + HPfHT ).I
-            del PfHT, HPfHT
-            #
-            for i in range(__m):
-                ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
-                Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
-        #--------------------------
-        elif VariantM == "KalmanFilterFormula16":
-            EpY   = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
-            EpYm  = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
-            #
-            EaX   = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
-            EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
-            #
-            Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
-            #
-            for i in range(__m):
-                Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
-        #--------------------------
-        else:
-            raise ValueError("VariantM has to be chosen in the authorized methods list.")
-        #
-        if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
-            Xn = CovarianceInflation( Xn,
-                selfA._parameters["InflationType"],
-                selfA._parameters["InflationFactor"],
-                )
-        #
-        if Hybrid == "E3DVAR":
-            betaf = selfA._parameters["HybridCovarianceEquilibrium"]
-            Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
-        #
-        Xa = EnsembleMean( Xn )
-        #--------------------------
-        selfA._setInternalState("Xn", Xn)
-        selfA._setInternalState("seed", numpy.random.get_state())
-        #--------------------------
-        #
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CostFunctionJ") \
-            or selfA._toStore("CostFunctionJb") \
-            or selfA._toStore("CostFunctionJo") \
-            or selfA._toStore("APosterioriCovariance") \
-            or selfA._toStore("InnovationAtCurrentAnalysis") \
-            or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
-            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
-            _Innovation = Ynpu - _HXa
-        #
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        # ---> avec analysis
-        selfA.StoredVariables["Analysis"].store( Xa )
-        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
-        if selfA._toStore("InnovationAtCurrentAnalysis"):
-            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
-        # ---> avec current state
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CurrentState"):
-            selfA.StoredVariables["CurrentState"].store( Xn )
-        if selfA._toStore("ForecastState"):
-            selfA.StoredVariables["ForecastState"].store( EMX )
-        if selfA._toStore("ForecastCovariance"):
-            selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
-        if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( EMX - Xa )
-        if selfA._toStore("InnovationAtCurrentState"):
-            selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
-        if selfA._toStore("SimulatedObservationAtCurrentState") \
-            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
-        # ---> autres
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CostFunctionJ") \
-            or selfA._toStore("CostFunctionJb") \
-            or selfA._toStore("CostFunctionJo") \
-            or selfA._toStore("CurrentOptimum") \
-            or selfA._toStore("APosterioriCovariance"):
-            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
-            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
-            J   = Jb + Jo
-            selfA.StoredVariables["CostFunctionJb"].store( Jb )
-            selfA.StoredVariables["CostFunctionJo"].store( Jo )
-            selfA.StoredVariables["CostFunctionJ" ].store( J )
-            #
-            if selfA._toStore("IndexOfOptimum") \
-                or selfA._toStore("CurrentOptimum") \
-                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
-                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
-                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
-                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-            if selfA._toStore("IndexOfOptimum"):
-                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
-            if selfA._toStore("CurrentOptimum"):
-                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
-            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
-            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
-            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
-            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
-        if selfA._parameters["EstimationOf"] == "Parameters" \
-            and J < previousJMinimum:
-            previousJMinimum    = J
-            XaMin               = Xa
-            if selfA._toStore("APosterioriCovariance"):
-                covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
-        # ---> Pour les smoothers
-        if selfA._toStore("CurrentEnsembleState"):
-            selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
-    #
-    # Stockage final supplémentaire de l'optimum en estimation de paramètres
-    # ----------------------------------------------------------------------
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        selfA.StoredVariables["Analysis"].store( XaMin )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
-        if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
-    #
-    return 0
-
-# ==============================================================================
-def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
-    """
-    3DVAR
-    """
-    #
-    # Initialisations
-    # ---------------
-    Hm = HO["Direct"].appliedTo
-    Ha = HO["Adjoint"].appliedInXTo
-    #
-    if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
-        HXb = numpy.asarray(Hm( Xb, HO["AppliedInX"]["HXb"] ))
-    else:
-        HXb = numpy.asarray(Hm( Xb ))
-    HXb = HXb.reshape((-1,1))
-    if Y.size != HXb.size:
-        raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
-    if max(Y.shape) != max(HXb.shape):
-        raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
-    #
-    if selfA._toStore("JacobianMatrixAtBackground"):
-        HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
-        HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
-        selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
-    #
-    BI = B.getI()
-    RI = R.getI()
-    #
-    Xini = selfA._parameters["InitializationPoint"]
-    #
-    # Définition de la fonction-coût
-    # ------------------------------
-    def CostFunction(x):
-        _X  = numpy.asarray(x).reshape((-1,1))
-        if selfA._parameters["StoreInternalVariables"] or \
-            selfA._toStore("CurrentState") or \
-            selfA._toStore("CurrentOptimum"):
-            selfA.StoredVariables["CurrentState"].store( _X )
-        _HX = numpy.asarray(Hm( _X )).reshape((-1,1))
-        _Innovation = Y - _HX
-        if selfA._toStore("SimulatedObservationAtCurrentState") or \
-            selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
-        if selfA._toStore("InnovationAtCurrentState"):
-            selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
-        #
-        Jb  = float( 0.5 * (_X - Xb).T * (BI * (_X - Xb)) )
-        Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
-        J   = Jb + Jo
-        #
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
-        selfA.StoredVariables["CostFunctionJb"].store( Jb )
-        selfA.StoredVariables["CostFunctionJo"].store( Jo )
-        selfA.StoredVariables["CostFunctionJ" ].store( J )
-        if selfA._toStore("IndexOfOptimum") or \
-            selfA._toStore("CurrentOptimum") or \
-            selfA._toStore("CostFunctionJAtCurrentOptimum") or \
-            selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
-            selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
-            selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-        if selfA._toStore("IndexOfOptimum"):
-            selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
-        if selfA._toStore("CurrentOptimum"):
-            selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
-        if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
-        if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
-            selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
-        if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
-            selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
-        if selfA._toStore("CostFunctionJAtCurrentOptimum"):
-            selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
-        return J
-    #
-    def GradientOfCostFunction(x):
-        _X      = numpy.asarray(x).reshape((-1,1))
-        _HX     = numpy.asarray(Hm( _X )).reshape((-1,1))
-        GradJb  = BI * (_X - Xb)
-        GradJo  = - Ha( (_X, RI * (Y - _HX)) )
-        GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
-        return GradJ
-    #
-    # Minimisation de la fonctionnelle
-    # --------------------------------
-    nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
-    #
-    if selfA._parameters["Minimizer"] == "LBFGSB":
-        if "0.19" <= scipy.version.version <= "1.1.0":
-            import lbfgsbhlt as optimiseur
-        else:
-            import scipy.optimize as optimiseur
-        Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
-            func        = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            bounds      = selfA._parameters["Bounds"],
-            maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
-            factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
-            pgtol       = selfA._parameters["ProjectedGradientTolerance"],
-            iprint      = selfA._parameters["optiprint"],
-            )
-        nfeval = Informations['funcalls']
-        rc     = Informations['warnflag']
-    elif selfA._parameters["Minimizer"] == "TNC":
-        Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
-            func        = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            bounds      = selfA._parameters["Bounds"],
-            maxfun      = selfA._parameters["MaximumNumberOfSteps"],
-            pgtol       = selfA._parameters["ProjectedGradientTolerance"],
-            ftol        = selfA._parameters["CostDecrementTolerance"],
-            messages    = selfA._parameters["optmessages"],
-            )
-    elif selfA._parameters["Minimizer"] == "CG":
-        Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
-            f           = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
-            gtol        = selfA._parameters["GradientNormTolerance"],
-            disp        = selfA._parameters["optdisp"],
-            full_output = True,
-            )
-    elif selfA._parameters["Minimizer"] == "NCG":
-        Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
-            f           = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
-            avextol     = selfA._parameters["CostDecrementTolerance"],
-            disp        = selfA._parameters["optdisp"],
-            full_output = True,
-            )
-    elif selfA._parameters["Minimizer"] == "BFGS":
-        Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
-            f           = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
-            gtol        = selfA._parameters["GradientNormTolerance"],
-            disp        = selfA._parameters["optdisp"],
-            full_output = True,
-            )
-    else:
-        raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
-    #
-    IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-    MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
-    #
-    # Correction pour pallier a un bug de TNC sur le retour du Minimum
-    # ----------------------------------------------------------------
-    if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
-        Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
-    #
-    Xa = Minimum
-    #--------------------------
-    #
-    selfA.StoredVariables["Analysis"].store( Xa )
-    #
-    if selfA._toStore("OMA") or \
-        selfA._toStore("SigmaObs2") or \
-        selfA._toStore("SimulationQuantiles") or \
-        selfA._toStore("SimulatedObservationAtOptimum"):
-        if selfA._toStore("SimulatedObservationAtCurrentState"):
-            HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
-        elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
-        else:
-            HXa = Hm( Xa )
-    #
-    if selfA._toStore("APosterioriCovariance") or \
-        selfA._toStore("SimulationQuantiles") or \
-        selfA._toStore("JacobianMatrixAtOptimum") or \
-        selfA._toStore("KalmanGainAtOptimum"):
-        HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
-        HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
-    if selfA._toStore("APosterioriCovariance") or \
-        selfA._toStore("SimulationQuantiles") or \
-        selfA._toStore("KalmanGainAtOptimum"):
-        HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
-        HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
-    if selfA._toStore("APosterioriCovariance") or \
-        selfA._toStore("SimulationQuantiles"):
-        A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
-    if selfA._toStore("APosterioriCovariance"):
-        selfA.StoredVariables["APosterioriCovariance"].store( A )
-    if selfA._toStore("JacobianMatrixAtOptimum"):
-        selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
-    if selfA._toStore("KalmanGainAtOptimum"):
-        if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
-        elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
-        selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
-    #
-    # Calculs et/ou stockages supplémentaires
-    # ---------------------------------------
-    if selfA._toStore("Innovation") or \
-        selfA._toStore("SigmaObs2") or \
-        selfA._toStore("MahalanobisConsistency") or \
-        selfA._toStore("OMB"):
-        d  = Y - HXb
-    if selfA._toStore("Innovation"):
-        selfA.StoredVariables["Innovation"].store( d )
-    if selfA._toStore("BMA"):
-        selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
-    if selfA._toStore("OMA"):
-        selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
-    if selfA._toStore("OMB"):
-        selfA.StoredVariables["OMB"].store( d )
-    if selfA._toStore("SigmaObs2"):
-        TraceR = R.trace(Y.size)
-        selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
-    if selfA._toStore("MahalanobisConsistency"):
-        selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
-    if selfA._toStore("SimulationQuantiles"):
-        QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
-    if selfA._toStore("SimulatedObservationAtBackground"):
-        selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
-    if selfA._toStore("SimulatedObservationAtOptimum"):
-        selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
-    #
-    return 0
-
-# ==============================================================================
-def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
-    """
-    4DVAR
-    """
-    #
-    # Initialisations
-    # ---------------
-    #
-    # Opérateurs
-    Hm = HO["Direct"].appliedControledFormTo
-    Mm = EM["Direct"].appliedControledFormTo
-    #
-    if CM is not None and "Tangent" in CM and U is not None:
-        Cm = CM["Tangent"].asMatrix(Xb)
-    else:
-        Cm = None
-    #
-    def Un(_step):
-        if U is not None:
-            if hasattr(U,"store") and 1<=_step<len(U) :
-                _Un = numpy.ravel( U[_step] ).reshape((-1,1))
-            elif hasattr(U,"store") and len(U)==1:
-                _Un = numpy.ravel( U[0] ).reshape((-1,1))
-            else:
-                _Un = numpy.ravel( U ).reshape((-1,1))
-        else:
-            _Un = None
-        return _Un
-    def CmUn(_xn,_un):
-        if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
-            _Cm   = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
-            _CmUn = (_Cm @ _un).reshape((-1,1))
-        else:
-            _CmUn = 0.
-        return _CmUn
-    #
-    # Remarque : les observations sont exploitées à partir du pas de temps
-    # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
-    # Donc le pas 0 n'est pas utilisé puisque la première étape commence
-    # avec l'observation du pas 1.
-    #
-    # Nombre de pas identique au nombre de pas d'observations
-    if hasattr(Y,"stepnumber"):
-        duration = Y.stepnumber()
-    else:
-        duration = 2
-    #
-    # Précalcul des inversions de B et R
-    BI = B.getI()
-    RI = R.getI()
-    #
-    # Point de démarrage de l'optimisation
-    Xini = selfA._parameters["InitializationPoint"]
-    #
-    # Définition de la fonction-coût
-    # ------------------------------
-    selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
-    selfA.DirectInnovation  = [None,] # Le pas 0 n'est pas observé
-    def CostFunction(x):
-        _X  = numpy.asarray(x).reshape((-1,1))
-        if selfA._parameters["StoreInternalVariables"] or \
-            selfA._toStore("CurrentState") or \
-            selfA._toStore("CurrentOptimum"):
-            selfA.StoredVariables["CurrentState"].store( _X )
-        Jb  = float( 0.5 * (_X - Xb).T * (BI * (_X - Xb)) )
-        selfA.DirectCalculation = [None,]
-        selfA.DirectInnovation  = [None,]
-        Jo  = 0.
-        _Xn = _X
-        for step in range(0,duration-1):
-            if hasattr(Y,"store"):
-                _Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
-            else:
-                _Ynpu = numpy.ravel( Y ).reshape((-1,1))
-            _Un = Un(step)
-            #
-            # Etape d'évolution
-            if selfA._parameters["EstimationOf"] == "State":
-                _Xn = Mm( (_Xn, _Un) ).reshape((-1,1)) + CmUn(_Xn, _Un)
-            elif selfA._parameters["EstimationOf"] == "Parameters":
-                pass
-            #
-            if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
-                _Xn = ApplyBounds( _Xn, ForceNumericBounds(selfA._parameters["Bounds"]) )
-            #
-            # Etape de différence aux observations
-            if selfA._parameters["EstimationOf"] == "State":
-                _YmHMX = _Ynpu - numpy.ravel( Hm( (_Xn, None) ) ).reshape((-1,1))
-            elif selfA._parameters["EstimationOf"] == "Parameters":
-                _YmHMX = _Ynpu - numpy.ravel( Hm( (_Xn, _Un) ) ).reshape((-1,1)) - CmUn(_Xn, _Un)
-            #
-            # Stockage de l'état
-            selfA.DirectCalculation.append( _Xn )
-            selfA.DirectInnovation.append( _YmHMX )
-            #
-            # Ajout dans la fonctionnelle d'observation
-            Jo = Jo + 0.5 * float( _YmHMX.T * (RI * _YmHMX) )
-        J = Jb + Jo
-        #
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
-        selfA.StoredVariables["CostFunctionJb"].store( Jb )
-        selfA.StoredVariables["CostFunctionJo"].store( Jo )
-        selfA.StoredVariables["CostFunctionJ" ].store( J )
-        if selfA._toStore("IndexOfOptimum") or \
-            selfA._toStore("CurrentOptimum") or \
-            selfA._toStore("CostFunctionJAtCurrentOptimum") or \
-            selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
-            selfA._toStore("CostFunctionJoAtCurrentOptimum"):
-            IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-        if selfA._toStore("IndexOfOptimum"):
-            selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
-        if selfA._toStore("CurrentOptimum"):
-            selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
-        if selfA._toStore("CostFunctionJAtCurrentOptimum"):
-            selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
-        if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
-            selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
-        if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
-            selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
-        return J
-    #
-    def GradientOfCostFunction(x):
-        _X      = numpy.asarray(x).reshape((-1,1))
-        GradJb  = BI * (_X - Xb)
-        GradJo  = 0.
-        for step in range(duration-1,0,-1):
-            # Étape de récupération du dernier stockage de l'évolution
-            _Xn = selfA.DirectCalculation.pop()
-            # Étape de récupération du dernier stockage de l'innovation
-            _YmHMX = selfA.DirectInnovation.pop()
-            # Calcul des adjoints
-            Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
-            Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
-            Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
-            Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
-            # Calcul du gradient par état adjoint
-            GradJo = GradJo + Ha * (RI * _YmHMX) # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
-            GradJo = Ma * GradJo                 # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
-        GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
-        return GradJ
-    #
-    # Minimisation de la fonctionnelle
-    # --------------------------------
-    nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
-    #
-    if selfA._parameters["Minimizer"] == "LBFGSB":
-        if "0.19" <= scipy.version.version <= "1.1.0":
-            import lbfgsbhlt as optimiseur
-        else:
-            import scipy.optimize as optimiseur
-        Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
-            func        = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            bounds      = selfA._parameters["Bounds"],
-            maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
-            factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
-            pgtol       = selfA._parameters["ProjectedGradientTolerance"],
-            iprint      = selfA._parameters["optiprint"],
-            )
-        nfeval = Informations['funcalls']
-        rc     = Informations['warnflag']
-    elif selfA._parameters["Minimizer"] == "TNC":
-        Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
-            func        = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            bounds      = selfA._parameters["Bounds"],
-            maxfun      = selfA._parameters["MaximumNumberOfSteps"],
-            pgtol       = selfA._parameters["ProjectedGradientTolerance"],
-            ftol        = selfA._parameters["CostDecrementTolerance"],
-            messages    = selfA._parameters["optmessages"],
-            )
-    elif selfA._parameters["Minimizer"] == "CG":
-        Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
-            f           = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
-            gtol        = selfA._parameters["GradientNormTolerance"],
-            disp        = selfA._parameters["optdisp"],
-            full_output = True,
-            )
-    elif selfA._parameters["Minimizer"] == "NCG":
-        Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
-            f           = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
-            avextol     = selfA._parameters["CostDecrementTolerance"],
-            disp        = selfA._parameters["optdisp"],
-            full_output = True,
-            )
-    elif selfA._parameters["Minimizer"] == "BFGS":
-        Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
-            f           = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
-            gtol        = selfA._parameters["GradientNormTolerance"],
-            disp        = selfA._parameters["optdisp"],
-            full_output = True,
-            )
-    else:
-        raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
-    #
-    IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-    MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
-    #
-    # Correction pour pallier a un bug de TNC sur le retour du Minimum
-    # ----------------------------------------------------------------
-    if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
-        Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
-    #
-    # Obtention de l'analyse
-    # ----------------------
-    Xa = Minimum
-    #
-    selfA.StoredVariables["Analysis"].store( Xa )
-    #
-    # Calculs et/ou stockages supplémentaires
-    # ---------------------------------------
-    if selfA._toStore("BMA"):
-        selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
-    #
-    return 0
-
-# ==============================================================================
-def stdkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
-    """
-    Standard Kalman Filter
-    """
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        selfA._parameters["StoreInternalVariables"] = True
-    #
-    # Opérateurs
-    # ----------
-    Ht = HO["Tangent"].asMatrix(Xb)
-    Ha = HO["Adjoint"].asMatrix(Xb)
-    #
-    if selfA._parameters["EstimationOf"] == "State":
-        Mt = EM["Tangent"].asMatrix(Xb)
-        Ma = EM["Adjoint"].asMatrix(Xb)
-    #
-    if CM is not None and "Tangent" in CM and U is not None:
-        Cm = CM["Tangent"].asMatrix(Xb)
-    else:
-        Cm = None
-    #
-    # Durée d'observation et tailles
-    if hasattr(Y,"stepnumber"):
-        duration = Y.stepnumber()
-        __p = numpy.cumprod(Y.shape())[-1]
-    else:
-        duration = 2
-        __p = numpy.array(Y).size
-    #
-    # Précalcul des inversions de B et R
-    if selfA._parameters["StoreInternalVariables"] \
-        or selfA._toStore("CostFunctionJ") \
-        or selfA._toStore("CostFunctionJb") \
-        or selfA._toStore("CostFunctionJo") \
-        or selfA._toStore("CurrentOptimum") \
-        or selfA._toStore("APosterioriCovariance"):
-        BI = B.getI()
-        RI = R.getI()
-    #
-    __n = Xb.size
-    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
-    #
-    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
-        Xn = Xb
-        Pn = B
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        selfA.StoredVariables["Analysis"].store( Xb )
-        if selfA._toStore("APosterioriCovariance"):
-            if hasattr(B,"asfullmatrix"):
-                selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
-            else:
-                selfA.StoredVariables["APosterioriCovariance"].store( B )
-        selfA._setInternalState("seed", numpy.random.get_state())
-    elif selfA._parameters["nextStep"]:
-        Xn = selfA._getInternalState("Xn")
-        Pn = selfA._getInternalState("Pn")
-    #
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        XaMin            = Xn
-        previousJMinimum = numpy.finfo(float).max
-    #
-    for step in range(duration-1):
-        if hasattr(Y,"store"):
-            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
-        else:
-            Ynpu = numpy.ravel( Y ).reshape((__p,1))
-        #
-        if U is not None:
-            if hasattr(U,"store") and len(U)>1:
-                Un = numpy.ravel( U[step] ).reshape((-1,1))
-            elif hasattr(U,"store") and len(U)==1:
-                Un = numpy.ravel( U[0] ).reshape((-1,1))
-            else:
-                Un = numpy.ravel( U ).reshape((-1,1))
-        else:
-            Un = None
-        #
-        if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
-            Xn_predicted = Mt @ Xn
-            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
-                Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
-                Xn_predicted = Xn_predicted + Cm @ Un
-            Pn_predicted = Q + Mt * (Pn * Ma)
-        elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
-            # --- > Par principe, M = Id, Q = 0
-            Xn_predicted = Xn
-            Pn_predicted = Pn
-        #
-        if selfA._parameters["EstimationOf"] == "State":
-            HX_predicted = Ht @ Xn_predicted
-            _Innovation  = Ynpu - HX_predicted
-        elif selfA._parameters["EstimationOf"] == "Parameters":
-            HX_predicted = Ht @ Xn_predicted
-            _Innovation  = Ynpu - HX_predicted
-            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
-                _Innovation = _Innovation - Cm @ Un
-        #
-        Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
-        Xn = Xn_predicted + Kn * _Innovation
-        Pn = Pn_predicted - Kn * Ht * Pn_predicted
-        #
-        Xa = Xn # Pointeurs
-        #--------------------------
-        selfA._setInternalState("Xn", Xn)
-        selfA._setInternalState("Pn", Pn)
-        #--------------------------
-        #
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        # ---> avec analysis
-        selfA.StoredVariables["Analysis"].store( Xa )
-        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Ht * Xa )
-        if selfA._toStore("InnovationAtCurrentAnalysis"):
-            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
-        # ---> avec current state
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CurrentState"):
-            selfA.StoredVariables["CurrentState"].store( Xn )
-        if selfA._toStore("ForecastState"):
-            selfA.StoredVariables["ForecastState"].store( Xn_predicted )
-        if selfA._toStore("ForecastCovariance"):
-            selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
-        if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
-        if selfA._toStore("InnovationAtCurrentState"):
-            selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
-        if selfA._toStore("SimulatedObservationAtCurrentState") \
-            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
-        # ---> autres
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CostFunctionJ") \
-            or selfA._toStore("CostFunctionJb") \
-            or selfA._toStore("CostFunctionJo") \
-            or selfA._toStore("CurrentOptimum") \
-            or selfA._toStore("APosterioriCovariance"):
-            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
-            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
-            J   = Jb + Jo
-            selfA.StoredVariables["CostFunctionJb"].store( Jb )
-            selfA.StoredVariables["CostFunctionJo"].store( Jo )
-            selfA.StoredVariables["CostFunctionJ" ].store( J )
-            #
-            if selfA._toStore("IndexOfOptimum") \
-                or selfA._toStore("CurrentOptimum") \
-                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
-                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
-                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
-                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-            if selfA._toStore("IndexOfOptimum"):
-                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
-            if selfA._toStore("CurrentOptimum"):
-                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
-            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
-            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
-            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
-            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
-        if selfA._parameters["EstimationOf"] == "Parameters" \
-            and J < previousJMinimum:
-            previousJMinimum    = J
-            XaMin               = Xa
-            if selfA._toStore("APosterioriCovariance"):
-                covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
-    #
-    # Stockage final supplémentaire de l'optimum en estimation de paramètres
-    # ----------------------------------------------------------------------
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        selfA.StoredVariables["Analysis"].store( XaMin )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
-        if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
-    #
-    return 0
-
-# ==============================================================================
-def uskf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
-    """
-    Unscented Kalman Filter
-    """
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        selfA._parameters["StoreInternalVariables"] = True
-    #
-    L     = Xb.size
-    Alpha = selfA._parameters["Alpha"]
-    Beta  = selfA._parameters["Beta"]
-    if selfA._parameters["Kappa"] == 0:
-        if selfA._parameters["EstimationOf"] == "State":
-            Kappa = 0
-        elif selfA._parameters["EstimationOf"] == "Parameters":
-            Kappa = 3 - L
-    else:
-        Kappa = selfA._parameters["Kappa"]
-    Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
-    Gamma  = math.sqrt( L + Lambda )
-    #
-    Ww = []
-    Ww.append( 0. )
-    for i in range(2*L):
-        Ww.append( 1. / (2.*(L + Lambda)) )
-    #
-    Wm = numpy.array( Ww )
-    Wm[0] = Lambda / (L + Lambda)
-    Wc = numpy.array( Ww )
-    Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
-    #
-    # Opérateurs
-    Hm = HO["Direct"].appliedControledFormTo
-    #
-    if selfA._parameters["EstimationOf"] == "State":
-        Mm = EM["Direct"].appliedControledFormTo
-    #
-    if CM is not None and "Tangent" in CM and U is not None:
-        Cm = CM["Tangent"].asMatrix(Xb)
-    else:
-        Cm = None
-    #
-    # Durée d'observation et tailles
-    if hasattr(Y,"stepnumber"):
-        duration = Y.stepnumber()
-        __p = numpy.cumprod(Y.shape())[-1]
-    else:
-        duration = 2
-        __p = numpy.array(Y).size
-    #
-    # Précalcul des inversions de B et R
-    if selfA._parameters["StoreInternalVariables"] \
-        or selfA._toStore("CostFunctionJ") \
-        or selfA._toStore("CostFunctionJb") \
-        or selfA._toStore("CostFunctionJo") \
-        or selfA._toStore("CurrentOptimum") \
-        or selfA._toStore("APosterioriCovariance"):
-        BI = B.getI()
-        RI = R.getI()
-    #
-    __n = Xb.size
-    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
-    #
-    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
-        Xn = Xb
-        if hasattr(B,"asfullmatrix"):
-            Pn = B.asfullmatrix(__n)
-        else:
-            Pn = B
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        selfA.StoredVariables["Analysis"].store( Xb )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
-    elif selfA._parameters["nextStep"]:
+        # --------------------------
         Xn = selfA._getInternalState("Xn")
-        Pn = selfA._getInternalState("Pn")
-    #
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        XaMin            = Xn
-        previousJMinimum = numpy.finfo(float).max
-    #
-    for step in range(duration-1):
-        if hasattr(Y,"store"):
-            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
-        else:
-            Ynpu = numpy.ravel( Y ).reshape((__p,1))
-        #
-        if U is not None:
-            if hasattr(U,"store") and len(U)>1:
-                Un = numpy.ravel( U[step] ).reshape((-1,1))
-            elif hasattr(U,"store") and len(U)==1:
-                Un = numpy.ravel( U[0] ).reshape((-1,1))
-            else:
-                Un = numpy.ravel( U ).reshape((-1,1))
-        else:
-            Un = None
-        #
-        Pndemi = numpy.real(scipy.linalg.sqrtm(Pn))
-        Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
-        nbSpts = 2*Xn.size+1
-        #
-        XEtnnp = []
-        for point in range(nbSpts):
-            if selfA._parameters["EstimationOf"] == "State":
-                XEtnnpi = numpy.asarray( Mm( (Xnp[:,point], Un) ) ).reshape((-1,1))
-                if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
-                    Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
-                    XEtnnpi = XEtnnpi + Cm @ Un
-            elif selfA._parameters["EstimationOf"] == "Parameters":
-                # --- > Par principe, M = Id, Q = 0
-                XEtnnpi = Xnp[:,point]
-            XEtnnp.append( numpy.ravel(XEtnnpi).reshape((-1,1)) )
-        XEtnnp = numpy.concatenate( XEtnnp, axis=1 )
-        #
-        Xncm = ( XEtnnp * Wm ).sum(axis=1)
-        #
-        if selfA._parameters["EstimationOf"] == "State":        Pnm = Q
-        elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
-        for point in range(nbSpts):
-            Pnm += Wc[i] * ((XEtnnp[:,point]-Xncm).reshape((-1,1)) * (XEtnnp[:,point]-Xncm))
-        #
-        Pnmdemi = numpy.real(scipy.linalg.sqrtm(Pnm))
-        #
-        Xnnp = numpy.hstack([Xncm.reshape((-1,1)), Xncm.reshape((-1,1))+Gamma*Pnmdemi, Xncm.reshape((-1,1))-Gamma*Pnmdemi])
-        #
-        Ynnp = []
-        for point in range(nbSpts):
-            if selfA._parameters["EstimationOf"] == "State":
-                Ynnpi = Hm( (Xnnp[:,point], None) )
-            elif selfA._parameters["EstimationOf"] == "Parameters":
-                Ynnpi = Hm( (Xnnp[:,point], Un) )
-            Ynnp.append( numpy.ravel(Ynnpi).reshape((-1,1)) )
-        Ynnp = numpy.concatenate( Ynnp, axis=1 )
-        #
-        Yncm = ( Ynnp * Wm ).sum(axis=1)
-        #
-        Pyyn = R
-        Pxyn = 0.
-        for point in range(nbSpts):
-            Pyyn += Wc[i] * ((Ynnp[:,point]-Yncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
-            Pxyn += Wc[i] * ((Xnnp[:,point]-Xncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
-        #
-        _Innovation  = Ynpu - Yncm.reshape((-1,1))
-        if selfA._parameters["EstimationOf"] == "Parameters":
-            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
-                _Innovation = _Innovation - Cm @ Un
-        #
-        Kn = Pxyn * Pyyn.I
-        Xn = Xncm.reshape((-1,1)) + Kn * _Innovation
-        Pn = Pnm - Kn * Pyyn * Kn.T
-        #
-        Xa = Xn # Pointeurs
-        #--------------------------
-        selfA._setInternalState("Xn", Xn)
-        selfA._setInternalState("Pn", Pn)
-        #--------------------------
-        #
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        # ---> avec analysis
-        selfA.StoredVariables["Analysis"].store( Xa )
-        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) )
-        if selfA._toStore("InnovationAtCurrentAnalysis"):
-            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
-        # ---> avec current state
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CurrentState"):
-            selfA.StoredVariables["CurrentState"].store( Xn )
-        if selfA._toStore("ForecastState"):
-            selfA.StoredVariables["ForecastState"].store( Xncm )
-        if selfA._toStore("ForecastCovariance"):
-            selfA.StoredVariables["ForecastCovariance"].store( Pnm )
-        if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( Xncm - Xa )
-        if selfA._toStore("InnovationAtCurrentState"):
-            selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
-        if selfA._toStore("SimulatedObservationAtCurrentState") \
-            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm )
-        # ---> autres
-        if selfA._parameters["StoreInternalVariables"] \
-            or selfA._toStore("CostFunctionJ") \
-            or selfA._toStore("CostFunctionJb") \
-            or selfA._toStore("CostFunctionJo") \
-            or selfA._toStore("CurrentOptimum") \
-            or selfA._toStore("APosterioriCovariance"):
-            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
-            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
-            J   = Jb + Jo
-            selfA.StoredVariables["CostFunctionJb"].store( Jb )
-            selfA.StoredVariables["CostFunctionJo"].store( Jo )
-            selfA.StoredVariables["CostFunctionJ" ].store( J )
-            #
-            if selfA._toStore("IndexOfOptimum") \
-                or selfA._toStore("CurrentOptimum") \
-                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
-                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
-                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
-                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-            if selfA._toStore("IndexOfOptimum"):
-                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
-            if selfA._toStore("CurrentOptimum"):
-                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
-            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
-            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
-            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
-            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
-                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
-        if selfA._parameters["EstimationOf"] == "Parameters" \
-            and J < previousJMinimum:
-            previousJMinimum    = J
-            XaMin               = Xa
-            if selfA._toStore("APosterioriCovariance"):
-                covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
-    #
-    # Stockage final supplémentaire de l'optimum en estimation de paramètres
-    # ----------------------------------------------------------------------
-    if selfA._parameters["EstimationOf"] == "Parameters":
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
-        selfA.StoredVariables["Analysis"].store( XaMin )
-        if selfA._toStore("APosterioriCovariance"):
-            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
-        if selfA._toStore("BMA"):
-            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
-    #
-    return 0
-
-# ==============================================================================
-def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
-    """
-    3DVAR variational analysis with no inversion of B
-    """
-    #
-    # Initialisations
-    # ---------------
-    Hm = HO["Direct"].appliedTo
-    Ha = HO["Adjoint"].appliedInXTo
-    #
-    BT = B.getT()
-    RI = R.getI()
-    #
-    Xini = numpy.zeros(Xb.size)
-    #
-    # Définition de la fonction-coût
-    # ------------------------------
-    def CostFunction(v):
-        _V = numpy.asarray(v).reshape((-1,1))
-        _X = Xb + (B @ _V).reshape((-1,1))
-        if selfA._parameters["StoreInternalVariables"] or \
-            selfA._toStore("CurrentState") or \
-            selfA._toStore("CurrentOptimum"):
-            selfA.StoredVariables["CurrentState"].store( _X )
-        _HX = numpy.asarray(Hm( _X )).reshape((-1,1))
-        _Innovation = Y - _HX
-        if selfA._toStore("SimulatedObservationAtCurrentState") or \
-            selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
-        if selfA._toStore("InnovationAtCurrentState"):
-            selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
-        #
-        Jb  = float( 0.5 * _V.T * (BT * _V) )
-        Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
-        J   = Jb + Jo
-        #
-        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
-        selfA.StoredVariables["CostFunctionJb"].store( Jb )
-        selfA.StoredVariables["CostFunctionJo"].store( Jo )
-        selfA.StoredVariables["CostFunctionJ" ].store( J )
-        if selfA._toStore("IndexOfOptimum") or \
-            selfA._toStore("CurrentOptimum") or \
-            selfA._toStore("CostFunctionJAtCurrentOptimum") or \
-            selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
-            selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
-            selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-        if selfA._toStore("IndexOfOptimum"):
-            selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
-        if selfA._toStore("CurrentOptimum"):
-            selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
-        if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
-        if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
-            selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
-        if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
-            selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
-        if selfA._toStore("CostFunctionJAtCurrentOptimum"):
-            selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
-        return J
-    #
-    def GradientOfCostFunction(v):
-        _V = numpy.asarray(v).reshape((-1,1))
-        _X = Xb + (B @ _V).reshape((-1,1))
-        _HX     = numpy.asarray(Hm( _X )).reshape((-1,1))
-        GradJb  = BT * _V
-        GradJo  = - Ha( (_X, RI * (Y - _HX)) )
-        GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
-        return GradJ
-    #
-    # Minimisation de la fonctionnelle
-    # --------------------------------
-    nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
-    #
-    if selfA._parameters["Minimizer"] == "LBFGSB":
-        if "0.19" <= scipy.version.version <= "1.1.0":
-            import lbfgsbhlt as optimiseur
-        else:
-            import scipy.optimize as optimiseur
-        Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
-            func        = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
-            maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
-            factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
-            pgtol       = selfA._parameters["ProjectedGradientTolerance"],
-            iprint      = selfA._parameters["optiprint"],
-            )
-        nfeval = Informations['funcalls']
-        rc     = Informations['warnflag']
-    elif selfA._parameters["Minimizer"] == "TNC":
-        Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
-            func        = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
-            maxfun      = selfA._parameters["MaximumNumberOfSteps"],
-            pgtol       = selfA._parameters["ProjectedGradientTolerance"],
-            ftol        = selfA._parameters["CostDecrementTolerance"],
-            messages    = selfA._parameters["optmessages"],
-            )
-    elif selfA._parameters["Minimizer"] == "CG":
-        Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
-            f           = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
-            gtol        = selfA._parameters["GradientNormTolerance"],
-            disp        = selfA._parameters["optdisp"],
-            full_output = True,
-            )
-    elif selfA._parameters["Minimizer"] == "NCG":
-        Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
-            f           = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
-            avextol     = selfA._parameters["CostDecrementTolerance"],
-            disp        = selfA._parameters["optdisp"],
-            full_output = True,
-            )
-    elif selfA._parameters["Minimizer"] == "BFGS":
-        Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
-            f           = CostFunction,
-            x0          = Xini,
-            fprime      = GradientOfCostFunction,
-            args        = (),
-            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
-            gtol        = selfA._parameters["GradientNormTolerance"],
-            disp        = selfA._parameters["optdisp"],
-            full_output = True,
-            )
-    else:
-        raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
-    #
-    IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
-    MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
-    #
-    # Correction pour pallier a un bug de TNC sur le retour du Minimum
-    # ----------------------------------------------------------------
-    if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
-        Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
-    else:
-        Minimum = Xb + B * Minimum.reshape((-1,1)) # Pas @
-    #
-    Xa = Minimum
-    #--------------------------
-    #
-    selfA.StoredVariables["Analysis"].store( Xa )
-    #
-    if selfA._toStore("OMA") or \
-        selfA._toStore("SigmaObs2") or \
-        selfA._toStore("SimulationQuantiles") or \
-        selfA._toStore("SimulatedObservationAtOptimum"):
-        if selfA._toStore("SimulatedObservationAtCurrentState"):
-            HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
-        elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
-        else:
-            HXa = Hm( Xa )
-    #
-    if selfA._toStore("APosterioriCovariance") or \
-        selfA._toStore("SimulationQuantiles") or \
-        selfA._toStore("JacobianMatrixAtOptimum") or \
-        selfA._toStore("KalmanGainAtOptimum"):
-        HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
-        HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
-    if selfA._toStore("APosterioriCovariance") or \
-        selfA._toStore("SimulationQuantiles") or \
-        selfA._toStore("KalmanGainAtOptimum"):
-        HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
-        HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
-    if selfA._toStore("APosterioriCovariance") or \
-        selfA._toStore("SimulationQuantiles"):
-        BI = B.getI()
-        A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
-    if selfA._toStore("APosterioriCovariance"):
-        selfA.StoredVariables["APosterioriCovariance"].store( A )
-    if selfA._toStore("JacobianMatrixAtOptimum"):
-        selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
-    if selfA._toStore("KalmanGainAtOptimum"):
-        if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
-        elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
-        selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
-    #
-    # Calculs et/ou stockages supplémentaires
-    # ---------------------------------------
-    if selfA._toStore("Innovation") or \
-        selfA._toStore("SigmaObs2") or \
-        selfA._toStore("MahalanobisConsistency") or \
-        selfA._toStore("OMB"):
-        d  = Y - HXb
-    if selfA._toStore("Innovation"):
-        selfA.StoredVariables["Innovation"].store( d )
-    if selfA._toStore("BMA"):
-        selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
-    if selfA._toStore("OMA"):
-        selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
-    if selfA._toStore("OMB"):
-        selfA.StoredVariables["OMB"].store( d )
-    if selfA._toStore("SigmaObs2"):
-        TraceR = R.trace(Y.size)
-        selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
-    if selfA._toStore("MahalanobisConsistency"):
-        selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
-    if selfA._toStore("SimulationQuantiles"):
-        QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
-    if selfA._toStore("SimulatedObservationAtBackground"):
-        selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
-    if selfA._toStore("SimulatedObservationAtOptimum"):
-        selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
+        if __CovForecast:
+            Pn = selfA._getInternalState("Pn")
     #
     return 0
 
 # ==============================================================================
 if __name__ == "__main__":
-    print('\n AUTODIAGNOSTIC\n')
+    print("\n AUTODIAGNOSTIC\n")