Salome HOME
Updating module version information
[modules/adao.git] / src / daComposant / daCore / NumericObjects.py
index 3938e4861204c97ed335414b39168c3e361995e7..aa6c683592e0c1a146b1ed356675970ae5d8defd 100644 (file)
@@ -1,6 +1,6 @@
 # -*- coding: utf-8 -*-
 #
-# Copyright (C) 2008-2020 EDF R&D
+# Copyright (C) 2008-2022 EDF R&D
 #
 # This library is free software; you can redistribute it and/or
 # modify it under the terms of the GNU Lesser General Public
@@ -26,23 +26,26 @@ __doc__ = """
 __author__ = "Jean-Philippe ARGAUD"
 
 import os, time, copy, types, sys, logging
-import math, numpy, scipy
-from daCore.BasicObjects import Operator
+import math, numpy, scipy, scipy.optimize, scipy.version
+from daCore.BasicObjects import Operator, Covariance, PartialAlgorithm
 from daCore.PlatformInfo import PlatformInfo
 mpr = PlatformInfo().MachinePrecision()
 mfp = PlatformInfo().MaximumPrecision()
 # logging.getLogger().setLevel(logging.DEBUG)
 
 # ==============================================================================
-def ExecuteFunction( paire ):
-    assert len(paire) == 2, "Incorrect number of arguments"
-    X, funcrepr = paire
-    __X = numpy.asmatrix(numpy.ravel( X )).T
+def ExecuteFunction( triplet ):
+    assert len(triplet) == 3, "Incorrect number of arguments"
+    X, xArgs, funcrepr = triplet
+    __X = numpy.ravel( X ).reshape((-1,1))
     __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
     __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
     __fonction = getattr(__module,funcrepr["__userFunction__name"])
     sys.path = __sys_path_tmp ; del __sys_path_tmp
-    __HX  = __fonction( __X )
+    if isinstance(xArgs, dict):
+        __HX  = __fonction( __X, **xArgs )
+    else:
+        __HX  = __fonction( __X )
     return numpy.ravel( __HX )
 
 # ==============================================================================
@@ -62,6 +65,8 @@ class FDApproximation(object):
             centeredDF            = False,
             increment             = 0.01,
             dX                    = None,
+            extraArguments        = None,
+            reducingMemoryUse     = False,
             avoidingRedundancy    = True,
             toleranceInRedundancy = 1.e-18,
             lenghtOfRedundancy    = -1,
@@ -70,6 +75,8 @@ class FDApproximation(object):
             mfEnabled             = False,
             ):
         self.__name = str(name)
+        self.__extraArgs = extraArguments
+        #
         if mpEnabled:
             try:
                 import multiprocessing
@@ -83,12 +90,12 @@ class FDApproximation(object):
             self.__mpWorkers = None
         logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
         #
-        if mfEnabled:
-            self.__mfEnabled = True
-        else:
-            self.__mfEnabled = False
+        self.__mfEnabled = bool(mfEnabled)
         logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
         #
+        self.__rmEnabled = bool(reducingMemoryUse)
+        logging.debug("FDA Calculs avec réduction mémoire : %s"%(self.__rmEnabled,))
+        #
         if avoidingRedundancy:
             self.__avoidRC = True
             self.__tolerBP = float(toleranceInRedundancy)
@@ -100,6 +107,9 @@ class FDApproximation(object):
             self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
         else:
             self.__avoidRC = False
+        logging.debug("FDA Calculs avec réduction des doublons : %s"%self.__avoidRC)
+        if self.__avoidRC:
+            logging.debug("FDA Tolérance de détermination des doublons : %.2e"%self.__tolerBP)
         #
         if self.__mpEnabled:
             if isinstance(Function,types.FunctionType):
@@ -114,7 +124,7 @@ class FDApproximation(object):
                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
                 self.__userFunction__path = os.path.dirname(mod)
                 del mod
-                self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
+                self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
             elif isinstance(Function,types.MethodType):
                 logging.debug("FDA Calculs en multiprocessing : MethodType")
@@ -128,12 +138,12 @@ class FDApproximation(object):
                 self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','')
                 self.__userFunction__path = os.path.dirname(mod)
                 del mod
-                self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
+                self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
                 self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct
             else:
                 raise TypeError("User defined function or method has to be provided for finite differences approximation.")
         else:
-            self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled )
+            self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs )
             self.__userFunction = self.__userOperator.appliedTo
         #
         self.__centeredDF = bool(centeredDF)
@@ -144,10 +154,7 @@ class FDApproximation(object):
         if dX is None:
             self.__dX     = None
         else:
-            self.__dX     = numpy.asmatrix(numpy.ravel( dX )).T
-        logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
-        if self.__avoidRC:
-            logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
+            self.__dX     = numpy.ravel( dX )
 
     # ---------------------------------------------------------
     def __doublon__(self, e, l, n, v=None):
@@ -160,25 +167,50 @@ class FDApproximation(object):
         return __ac, __iac
 
     # ---------------------------------------------------------
-    def DirectOperator(self, X ):
+    def __listdotwith__(self, __LMatrix, __dotWith = None, __dotTWith = None):
+        "Produit incrémental d'une matrice liste de colonnes avec un vecteur"
+        if not isinstance(__LMatrix, (list,tuple)):
+            raise TypeError("Columnwise list matrix has not the proper type: %s"%type(__LMatrix))
+        if __dotWith is not None:
+            __Idwx = numpy.ravel( __dotWith )
+            assert len(__LMatrix) == __Idwx.size, "Incorrect size of elements"
+            __Produit = numpy.zeros(__LMatrix[0].size)
+            for i, col in enumerate(__LMatrix):
+                __Produit += float(__Idwx[i]) * col
+            return __Produit
+        elif __dotTWith is not None:
+            _Idwy = numpy.ravel( __dotTWith ).T
+            assert __LMatrix[0].size == _Idwy.size, "Incorrect size of elements"
+            __Produit = numpy.zeros(len(__LMatrix))
+            for i, col in enumerate(__LMatrix):
+                __Produit[i] = float( _Idwy @ col)
+            return __Produit
+        else:
+            __Produit = None
+        return __Produit
+
+    # ---------------------------------------------------------
+    def DirectOperator(self, X, **extraArgs ):
         """
         Calcul du direct à l'aide de la fonction fournie.
+
+        NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
+        ne doivent pas être données ici à la fonction utilisateur.
         """
         logging.debug("FDA Calcul DirectOperator (explicite)")
         if self.__mfEnabled:
             _HX = self.__userFunction( X, argsAsSerie = True )
         else:
-            _X = numpy.asmatrix(numpy.ravel( X )).T
-            _HX = numpy.ravel(self.__userFunction( _X ))
+            _HX = numpy.ravel(self.__userFunction( numpy.ravel(X) ))
         #
         return _HX
 
     # ---------------------------------------------------------
-    def TangentMatrix(self, X ):
+    def TangentMatrix(self, X, dotWith = None, dotTWith = None ):
         """
         Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
         c'est-à-dire le gradient de H en X. On utilise des différences finies
-        directionnelles autour du point X. X est un numpy.matrix.
+        directionnelles autour du point X. X est un numpy.ndarray.
 
         Différences finies centrées (approximation d'ordre 2):
         1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
@@ -206,12 +238,14 @@ class FDApproximation(object):
         if X is None or len(X)==0:
             raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
         #
-        _X = numpy.asmatrix(numpy.ravel( X )).T
+        _X = numpy.ravel( X )
         #
         if self.__dX is None:
             _dX  = self.__increment * _X
         else:
-            _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
+            _dX = numpy.ravel( self.__dX )
+        assert len(_X) == len(_dX), "Inconsistent dX increment length with respect to the X one"
+        assert _X.size == _dX.size, "Inconsistent dX increment size with respect to the X one"
         #
         if (_dX == 0.).any():
             moyenne = _dX.mean()
@@ -226,11 +260,16 @@ class FDApproximation(object):
             __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
             if __alreadyCalculatedP == __alreadyCalculatedI > -1:
                 __alreadyCalculated, __i = True, __alreadyCalculatedP
-                logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
+                logging.debug("FDA Cas J déjà calculé, récupération du doublon %i"%__i)
         #
         if __alreadyCalculated:
             logging.debug("FDA   Calcul Jacobienne (par récupération du doublon %i)"%__i)
             _Jacobienne = self.__listJPCR[__i]
+            logging.debug("FDA Fin du calcul de la Jacobienne")
+            if dotWith is not None:
+                return numpy.dot(_Jacobienne,   numpy.ravel( dotWith ))
+            elif dotTWith is not None:
+                return numpy.dot(_Jacobienne.T, numpy.ravel( dotTWith ))
         else:
             logging.debug("FDA   Calcul Jacobienne (explicite)")
             if self.__centeredDF:
@@ -244,13 +283,13 @@ class FDApproximation(object):
                     _jobs = []
                     for i in range( len(_dX) ):
                         _dXi            = _dX[i]
-                        _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
+                        _X_plus_dXi     = numpy.array( _X, dtype=float )
                         _X_plus_dXi[i]  = _X[i] + _dXi
-                        _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
+                        _X_moins_dXi    = numpy.array( _X, dtype=float )
                         _X_moins_dXi[i] = _X[i] - _dXi
                         #
-                        _jobs.append( (_X_plus_dXi,  funcrepr) )
-                        _jobs.append( (_X_moins_dXi, funcrepr) )
+                        _jobs.append( (_X_plus_dXi,  self.__extraArgs, funcrepr) )
+                        _jobs.append( (_X_moins_dXi, self.__extraArgs, funcrepr) )
                     #
                     import multiprocessing
                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
@@ -266,9 +305,9 @@ class FDApproximation(object):
                     _xserie = []
                     for i in range( len(_dX) ):
                         _dXi            = _dX[i]
-                        _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
+                        _X_plus_dXi     = numpy.array( _X, dtype=float )
                         _X_plus_dXi[i]  = _X[i] + _dXi
-                        _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
+                        _X_moins_dXi    = numpy.array( _X, dtype=float )
                         _X_moins_dXi[i] = _X[i] - _dXi
                         #
                         _xserie.append( _X_plus_dXi )
@@ -284,9 +323,9 @@ class FDApproximation(object):
                     _Jacobienne  = []
                     for i in range( _dX.size ):
                         _dXi            = _dX[i]
-                        _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
+                        _X_plus_dXi     = numpy.array( _X, dtype=float )
                         _X_plus_dXi[i]  = _X[i] + _dXi
-                        _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
+                        _X_moins_dXi    = numpy.array( _X, dtype=float )
                         _X_moins_dXi[i] = _X[i] - _dXi
                         #
                         _HX_plus_dXi    = self.DirectOperator( _X_plus_dXi )
@@ -303,12 +342,12 @@ class FDApproximation(object):
                         "__userFunction__name" : self.__userFunction__name,
                     }
                     _jobs = []
-                    _jobs.append( (_X.A1, funcrepr) )
+                    _jobs.append( (_X, self.__extraArgs, funcrepr) )
                     for i in range( len(_dX) ):
-                        _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
+                        _X_plus_dXi    = numpy.array( _X, dtype=float )
                         _X_plus_dXi[i] = _X[i] + _dX[i]
                         #
-                        _jobs.append( (_X_plus_dXi, funcrepr) )
+                        _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
                     #
                     import multiprocessing
                     self.__pool = multiprocessing.Pool(self.__mpWorkers)
@@ -324,9 +363,9 @@ class FDApproximation(object):
                     #
                 elif self.__mfEnabled:
                     _xserie = []
-                    _xserie.append( _X.A1 )
+                    _xserie.append( _X )
                     for i in range( len(_dX) ):
-                        _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
+                        _X_plus_dXi    = numpy.array( _X, dtype=float )
                         _X_plus_dXi[i] = _X[i] + _dX[i]
                         #
                         _xserie.append( _X_plus_dXi )
@@ -344,572 +383,4054 @@ class FDApproximation(object):
                     _HX = self.DirectOperator( _X )
                     for i in range( _dX.size ):
                         _dXi            = _dX[i]
-                        _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
+                        _X_plus_dXi     = numpy.array( _X, dtype=float )
                         _X_plus_dXi[i]  = _X[i] + _dXi
                         #
                         _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
                         #
                         _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
-                #
             #
-            _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
-            if self.__avoidRC:
-                if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
-                while len(self.__listJPCP) > self.__lenghtRJ:
-                    self.__listJPCP.pop(0)
-                    self.__listJPCI.pop(0)
-                    self.__listJPCR.pop(0)
-                    self.__listJPPN.pop(0)
-                    self.__listJPIN.pop(0)
-                self.__listJPCP.append( copy.copy(_X) )
-                self.__listJPCI.append( copy.copy(_dX) )
-                self.__listJPCR.append( copy.copy(_Jacobienne) )
-                self.__listJPPN.append( numpy.linalg.norm(_X) )
-                self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
-        #
-        logging.debug("FDA Fin du calcul de la Jacobienne")
+            if (dotWith is not None) or (dotTWith is not None):
+                __Produit = self.__listdotwith__(_Jacobienne, dotWith, dotTWith)
+            else:
+                __Produit = None
+            if __Produit is None or self.__avoidRC:
+                _Jacobienne = numpy.transpose( numpy.vstack( _Jacobienne ) )
+                if self.__avoidRC:
+                    if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
+                    while len(self.__listJPCP) > self.__lenghtRJ:
+                        self.__listJPCP.pop(0)
+                        self.__listJPCI.pop(0)
+                        self.__listJPCR.pop(0)
+                        self.__listJPPN.pop(0)
+                        self.__listJPIN.pop(0)
+                    self.__listJPCP.append( copy.copy(_X) )
+                    self.__listJPCI.append( copy.copy(_dX) )
+                    self.__listJPCR.append( copy.copy(_Jacobienne) )
+                    self.__listJPPN.append( numpy.linalg.norm(_X) )
+                    self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
+            logging.debug("FDA Fin du calcul de la Jacobienne")
+            if __Produit is not None:
+                return __Produit
         #
         return _Jacobienne
 
     # ---------------------------------------------------------
-    def TangentOperator(self, paire ):
+    def TangentOperator(self, paire, **extraArgs ):
         """
         Calcul du tangent à l'aide de la Jacobienne.
+
+        NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
+        ne doivent pas être données ici à la fonction utilisateur.
         """
         if self.__mfEnabled:
-            assert len(paire) == 1, "Incorrect lenght of arguments"
+            assert len(paire) == 1, "Incorrect length of arguments"
             _paire = paire[0]
             assert len(_paire) == 2, "Incorrect number of arguments"
         else:
             assert len(paire) == 2, "Incorrect number of arguments"
             _paire = paire
         X, dX = _paire
-        _Jacobienne = self.TangentMatrix( X )
         if dX is None or len(dX) == 0:
             #
             # Calcul de la forme matricielle si le second argument est None
             # -------------------------------------------------------------
+            _Jacobienne = self.TangentMatrix( X )
             if self.__mfEnabled: return [_Jacobienne,]
             else:                return _Jacobienne
         else:
             #
             # Calcul de la valeur linéarisée de H en X appliqué à dX
             # ------------------------------------------------------
-            _dX = numpy.asmatrix(numpy.ravel( dX )).T
-            _HtX = numpy.dot(_Jacobienne, _dX)
-            if self.__mfEnabled: return [_HtX.A1,]
-            else:                return _HtX.A1
+            _HtX = self.TangentMatrix( X, dotWith = dX )
+            if self.__mfEnabled: return [_HtX,]
+            else:                return _HtX
 
     # ---------------------------------------------------------
-    def AdjointOperator(self, paire ):
+    def AdjointOperator(self, paire, **extraArgs ):
         """
         Calcul de l'adjoint à l'aide de la Jacobienne.
+
+        NB : les extraArgs sont là pour assurer la compatibilité d'appel, mais
+        ne doivent pas être données ici à la fonction utilisateur.
         """
         if self.__mfEnabled:
-            assert len(paire) == 1, "Incorrect lenght of arguments"
+            assert len(paire) == 1, "Incorrect length of arguments"
             _paire = paire[0]
             assert len(_paire) == 2, "Incorrect number of arguments"
         else:
             assert len(paire) == 2, "Incorrect number of arguments"
             _paire = paire
         X, Y = _paire
-        _JacobienneT = self.TangentMatrix( X ).T
         if Y is None or len(Y) == 0:
             #
             # Calcul de la forme matricielle si le second argument est None
             # -------------------------------------------------------------
+            _JacobienneT = self.TangentMatrix( X ).T
             if self.__mfEnabled: return [_JacobienneT,]
             else:                return _JacobienneT
         else:
             #
             # Calcul de la valeur de l'adjoint en X appliqué à Y
             # --------------------------------------------------
-            _Y = numpy.asmatrix(numpy.ravel( Y )).T
-            _HaY = numpy.dot(_JacobienneT, _Y)
-            if self.__mfEnabled: return [_HaY.A1,]
-            else:                return _HaY.A1
+            _HaY = self.TangentMatrix( X, dotTWith = Y )
+            if self.__mfEnabled: return [_HaY,]
+            else:                return _HaY
 
 # ==============================================================================
-def mmqr(
-        func     = None,
-        x0       = None,
-        fprime   = None,
-        bounds   = None,
-        quantile = 0.5,
-        maxfun   = 15000,
-        toler    = 1.e-06,
-        y        = None,
+def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
+    "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
+    #
+    _bgcenter = numpy.ravel(_bgcenter)[:,None]
+    if _nbmembers < 1:
+        raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
+    #
+    if _bgcovariance is None:
+        _Perturbations = numpy.tile( _bgcenter, _nbmembers)
+    else:
+        _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
+        _Perturbations = numpy.tile( _bgcenter, _nbmembers) + _Z
+    #
+    return _Perturbations
+
+# ==============================================================================
+def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
+    "Génération d'un ensemble de taille _nbmembers-1 d'états aléatoires centrés"
+    def __CenteredRandomAnomalies(Zr, N):
+        """
+        Génère une matrice de N anomalies aléatoires centrées sur Zr selon les
+        notes manuscrites de MB et conforme au code de PS avec eps = -1
+        """
+        eps = -1
+        Q = numpy.identity(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
+        Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
+        R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
+        Q = numpy.dot(Q,R)
+        Zr = numpy.dot(Q,Zr)
+        return Zr.T
+    #
+    _bgcenter = numpy.ravel(_bgcenter).reshape((-1,1))
+    if _nbmembers < 1:
+        raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
+    if _bgcovariance is None:
+        _Perturbations = numpy.tile( _bgcenter, _nbmembers)
+    else:
+        if _withSVD:
+            _U, _s, _V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
+            _nbctl = _bgcenter.size
+            if _nbmembers > _nbctl:
+                _Z = numpy.concatenate((numpy.dot(
+                    numpy.diag(numpy.sqrt(_s[:_nbctl])), _V[:_nbctl]),
+                    numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
+            else:
+                _Z = numpy.dot(numpy.diag(numpy.sqrt(_s[:_nbmembers-1])), _V[:_nbmembers-1])
+            _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
+            _Perturbations = _bgcenter + _Zca
+        else:
+            if max(abs(_bgcovariance.flatten())) > 0:
+                _nbctl = _bgcenter.size
+                _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
+                _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
+                _Perturbations = _bgcenter + _Zca
+            else:
+                _Perturbations = numpy.tile( _bgcenter, _nbmembers)
+    #
+    return _Perturbations
+
+# ==============================================================================
+def EnsembleMean( __Ensemble ):
+    "Renvoie la moyenne empirique d'un ensemble"
+    return numpy.asarray(__Ensemble).mean(axis=1, dtype=mfp).astype('float').reshape((-1,1))
+
+# ==============================================================================
+def EnsembleOfAnomalies( __Ensemble, __OptMean = None, __Normalisation = 1.):
+    "Renvoie les anomalies centrées à partir d'un ensemble"
+    if __OptMean is None:
+        __Em = EnsembleMean( __Ensemble )
+    else:
+        __Em = numpy.ravel( __OptMean ).reshape((-1,1))
+    #
+    return __Normalisation * (numpy.asarray( __Ensemble ) - __Em)
+
+# ==============================================================================
+def EnsembleErrorCovariance( __Ensemble, __quick = False ):
+    "Renvoie l'estimation empirique de la covariance d'ensemble"
+    if __quick:
+        # Covariance rapide mais rarement définie positive
+        __Covariance = numpy.cov( __Ensemble )
+    else:
+        # Résultat souvent identique à numpy.cov, mais plus robuste
+        __n, __m = numpy.asarray( __Ensemble ).shape
+        __Anomalies = EnsembleOfAnomalies( __Ensemble )
+        # Estimation empirique
+        __Covariance = ( __Anomalies @ __Anomalies.T ) / (__m-1)
+        # Assure la symétrie
+        __Covariance = ( __Covariance + __Covariance.T ) * 0.5
+        # Assure la positivité
+        __epsilon    = mpr*numpy.trace( __Covariance )
+        __Covariance = __Covariance + __epsilon * numpy.identity(__n)
+    #
+    return __Covariance
+
+# ==============================================================================
+def EnsemblePerturbationWithGivenCovariance( __Ensemble, __Covariance, __Seed=None ):
+    "Ajout d'une perturbation à chaque membre d'un ensemble selon une covariance prescrite"
+    if hasattr(__Covariance,"assparsematrix"):
+        if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance.assparsematrix())/abs(__Ensemble).mean() < mpr).all():
+            # Traitement d'une covariance nulle ou presque
+            return __Ensemble
+        if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance.assparsematrix()) < mpr).all():
+            # Traitement d'une covariance nulle ou presque
+            return __Ensemble
+    else:
+        if (abs(__Ensemble).mean() > mpr) and (abs(__Covariance)/abs(__Ensemble).mean() < mpr).all():
+            # Traitement d'une covariance nulle ou presque
+            return __Ensemble
+        if (abs(__Ensemble).mean() <= mpr) and (abs(__Covariance) < mpr).all():
+            # Traitement d'une covariance nulle ou presque
+            return __Ensemble
+    #
+    __n, __m = __Ensemble.shape
+    if __Seed is not None: numpy.random.seed(__Seed)
+    #
+    if hasattr(__Covariance,"isscalar") and __Covariance.isscalar():
+        # Traitement d'une covariance multiple de l'identité
+        __zero = 0.
+        __std  = numpy.sqrt(__Covariance.assparsematrix())
+        __Ensemble += numpy.random.normal(__zero, __std, size=(__m,__n)).T
+    #
+    elif hasattr(__Covariance,"isvector") and __Covariance.isvector():
+        # Traitement d'une covariance diagonale avec variances non identiques
+        __zero = numpy.zeros(__n)
+        __std  = numpy.sqrt(__Covariance.assparsematrix())
+        __Ensemble += numpy.asarray([numpy.random.normal(__zero, __std) for i in range(__m)]).T
+    #
+    elif hasattr(__Covariance,"ismatrix") and __Covariance.ismatrix():
+        # Traitement d'une covariance pleine
+        __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance.asfullmatrix(__n), size=__m).T
+    #
+    elif isinstance(__Covariance, numpy.ndarray):
+        # Traitement d'une covariance numpy pleine, sachant qu'on arrive ici en dernier
+        __Ensemble += numpy.random.multivariate_normal(numpy.zeros(__n), __Covariance, size=__m).T
+    #
+    else:
+        raise ValueError("Error in ensemble perturbation with inadequate covariance specification")
+    #
+    return __Ensemble
+
+# ==============================================================================
+def CovarianceInflation(
+        InputCovOrEns,
+        InflationType   = None,
+        InflationFactor = None,
+        BackgroundCov   = None,
         ):
     """
-    Implémentation informatique de l'algorithme MMQR, basée sur la publication :
-    David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
-    Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
+    Inflation applicable soit sur Pb ou Pa, soit sur les ensembles EXb ou EXa
+
+    Synthèse : Hunt 2007, section 2.3.5
     """
+    if InflationFactor is None:
+        return InputCovOrEns
+    else:
+        InflationFactor = float(InflationFactor)
     #
-    # Recuperation des donnees et informations initiales
-    # --------------------------------------------------
-    variables = numpy.ravel( x0 )
-    mesures   = numpy.ravel( y )
-    increment = sys.float_info[0]
-    p         = variables.size
-    n         = mesures.size
-    quantile  = float(quantile)
+    if InflationType in ["MultiplicativeOnAnalysisCovariance", "MultiplicativeOnBackgroundCovariance"]:
+        if InflationFactor < 1.:
+            raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
+        if InflationFactor < 1.+mpr:
+            return InputCovOrEns
+        OutputCovOrEns = InflationFactor**2 * InputCovOrEns
     #
-    # Calcul des parametres du MM
-    # ---------------------------
-    tn      = float(toler) / n
-    e0      = -tn / math.log(tn)
-    epsilon = (e0-tn)/(1+math.log(e0))
+    elif InflationType in ["MultiplicativeOnAnalysisAnomalies", "MultiplicativeOnBackgroundAnomalies"]:
+        if InflationFactor < 1.:
+            raise ValueError("Inflation factor for multiplicative inflation has to be greater or equal than 1.")
+        if InflationFactor < 1.+mpr:
+            return InputCovOrEns
+        InputCovOrEnsMean = InputCovOrEns.mean(axis=1, dtype=mfp).astype('float')
+        OutputCovOrEns = InputCovOrEnsMean[:,numpy.newaxis] \
+            + InflationFactor * (InputCovOrEns - InputCovOrEnsMean[:,numpy.newaxis])
     #
-    # Calculs d'initialisation
-    # ------------------------
-    residus  = mesures - numpy.ravel( func( variables ) )
-    poids    = 1./(epsilon+numpy.abs(residus))
-    veps     = 1. - 2. * quantile - residus * poids
-    lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
-    iteration = 0
+    elif InflationType in ["AdditiveOnAnalysisCovariance", "AdditiveOnBackgroundCovariance"]:
+        if InflationFactor < 0.:
+            raise ValueError("Inflation factor for additive inflation has to be greater or equal than 0.")
+        if InflationFactor < mpr:
+            return InputCovOrEns
+        __n, __m = numpy.asarray(InputCovOrEns).shape
+        if __n != __m:
+            raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
+        OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * numpy.identity(__n)
     #
-    # Recherche iterative
-    # -------------------
-    while (increment > toler) and (iteration < maxfun) :
-        iteration += 1
+    elif InflationType == "HybridOnBackgroundCovariance":
+        if InflationFactor < 0.:
+            raise ValueError("Inflation factor for hybrid inflation has to be greater or equal than 0.")
+        if InflationFactor < mpr:
+            return InputCovOrEns
+        __n, __m = numpy.asarray(InputCovOrEns).shape
+        if __n != __m:
+            raise ValueError("Additive inflation can only be applied to squared (covariance) matrix.")
+        if BackgroundCov is None:
+            raise ValueError("Background covariance matrix B has to be given for hybrid inflation.")
+        if InputCovOrEns.shape != BackgroundCov.shape:
+            raise ValueError("Ensemble covariance matrix has to be of same size than background covariance matrix B.")
+        OutputCovOrEns = (1. - InflationFactor) * InputCovOrEns + InflationFactor * BackgroundCov
+    #
+    elif InflationType == "Relaxation":
+        raise NotImplementedError("InflationType Relaxation")
+    #
+    else:
+        raise ValueError("Error in inflation type, '%s' is not a valid keyword."%InflationType)
+    #
+    return OutputCovOrEns
+
+# ==============================================================================
+def HessienneEstimation(nb, HaM, HtM, BI, RI):
+    "Estimation de la Hessienne"
+    #
+    HessienneI = []
+    for i in range(int(nb)):
+        _ee    = numpy.zeros((nb,1))
+        _ee[i] = 1.
+        _HtEE  = numpy.dot(HtM,_ee).reshape((-1,1))
+        HessienneI.append( numpy.ravel( BI * _ee + HaM * (RI * _HtEE) ) )
+    #
+    A = numpy.linalg.inv(numpy.array( HessienneI ))
+    #
+    if min(A.shape) != max(A.shape):
+        raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
+    if (numpy.diag(A) < 0).any():
+        raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
+    if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
+        try:
+            L = numpy.linalg.cholesky( A )
+        except:
+            raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
+    #
+    return A
+
+# ==============================================================================
+def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None):
+    "Estimation des quantiles a posteriori (selfA est modifié)"
+    nbsamples = selfA._parameters["NumberOfSamplesForQuantiles"]
+    #
+    # Traitement des bornes
+    if "StateBoundsForQuantiles" in selfA._parameters:
+        LBounds = selfA._parameters["StateBoundsForQuantiles"] # Prioritaire
+    elif "Bounds" in selfA._parameters:
+        LBounds = selfA._parameters["Bounds"]  # Défaut raisonnable
+    else:
+        LBounds = None
+    if LBounds is not None:
+        LBounds = ForceNumericBounds( LBounds )
+    _Xa = numpy.ravel(Xa)
+    #
+    # Échantillonnage des états
+    YfQ  = None
+    EXr  = None
+    for i in range(nbsamples):
+        if selfA._parameters["SimulationForQuantiles"] == "Linear" and HtM is not None and HXa is not None:
+            dXr = (numpy.random.multivariate_normal(_Xa,A) - _Xa).reshape((-1,1))
+            if LBounds is not None: # "EstimateProjection" par défaut
+                dXr = numpy.max(numpy.hstack((dXr,LBounds[:,0].reshape((-1,1))) - Xa),axis=1)
+                dXr = numpy.min(numpy.hstack((dXr,LBounds[:,1].reshape((-1,1))) - Xa),axis=1)
+            dYr = HtM @ dXr
+            Yr = HXa.reshape((-1,1)) + dYr
+            if selfA._toStore("SampledStateForQuantiles"): Xr = _Xa + numpy.ravel(dXr)
+        elif selfA._parameters["SimulationForQuantiles"] == "NonLinear" and Hm is not None:
+            Xr = numpy.random.multivariate_normal(_Xa,A)
+            if LBounds is not None: # "EstimateProjection" par défaut
+                Xr = numpy.max(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,0].reshape((-1,1)))),axis=1)
+                Xr = numpy.min(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,1].reshape((-1,1)))),axis=1)
+            Yr = numpy.asarray(Hm( Xr ))
+        else:
+            raise ValueError("Quantile simulations has only to be Linear or NonLinear.")
         #
-        Derivees  = numpy.array(fprime(variables))
-        Derivees  = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
-        DeriveesT = Derivees.transpose()
-        M         =   numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
-        SM        =   numpy.transpose(numpy.dot( DeriveesT , veps ))
-        step      = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
+        if YfQ is None:
+            YfQ = Yr.reshape((-1,1))
+            if selfA._toStore("SampledStateForQuantiles"): EXr = Xr.reshape((-1,1))
+        else:
+            YfQ = numpy.hstack((YfQ,Yr.reshape((-1,1))))
+            if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.hstack((EXr,Xr.reshape((-1,1))))
+    #
+    # Extraction des quantiles
+    YfQ.sort(axis=-1)
+    YQ = None
+    for quantile in selfA._parameters["Quantiles"]:
+        if not (0. <= float(quantile) <= 1.): continue
+        indice = int(nbsamples * float(quantile) - 1./nbsamples)
+        if YQ is None: YQ = YfQ[:,indice].reshape((-1,1))
+        else:          YQ = numpy.hstack((YQ,YfQ[:,indice].reshape((-1,1))))
+    if YQ is not None: # Liste non vide de quantiles
+        selfA.StoredVariables["SimulationQuantiles"].store( YQ )
+    if selfA._toStore("SampledStateForQuantiles"):
+        selfA.StoredVariables["SampledStateForQuantiles"].store( EXr )
+    #
+    return 0
+
+# ==============================================================================
+def ForceNumericBounds( __Bounds ):
+    "Force les bornes à être des valeurs numériques, sauf si globalement None"
+    # Conserve une valeur par défaut à None s'il n'y a pas de bornes
+    if __Bounds is None: return None
+    # Converti toutes les bornes individuelles None à +/- l'infini
+    __Bounds = numpy.asarray( __Bounds, dtype=float )
+    if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
+        raise ValueError("Incorrectly shaped bounds data")
+    __Bounds[numpy.isnan(__Bounds[:,0]),0] = -sys.float_info.max
+    __Bounds[numpy.isnan(__Bounds[:,1]),1] =  sys.float_info.max
+    return __Bounds
+
+# ==============================================================================
+def RecentredBounds( __Bounds, __Center):
+    "Recentre les bornes autour de 0, sauf si globalement None"
+    # Conserve une valeur par défaut à None s'il n'y a pas de bornes
+    if __Bounds is None: return None
+    # Recentre les valeurs numériques de bornes
+    return ForceNumericBounds( __Bounds ) - numpy.ravel( __Center ).reshape((-1,1))
+
+# ==============================================================================
+def ApplyBounds( __Vector, __Bounds, __newClip = True):
+    "Applique des bornes numériques à un point"
+    # Conserve une valeur par défaut s'il n'y a pas de bornes
+    if __Bounds is None: return __Vector
+    #
+    if not isinstance(__Vector, numpy.ndarray): # Is an array
+        raise ValueError("Incorrect array definition of vector data")
+    if not isinstance(__Bounds, numpy.ndarray): # Is an array
+        raise ValueError("Incorrect array definition of bounds data")
+    if 2*__Vector.size != __Bounds.size: # Is a 2 column array of vector lenght
+        raise ValueError("Incorrect bounds number (%i) to be applied for this vector (of size %i)"%(__Bounds.size,__Vector.size))
+    if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
+        raise ValueError("Incorrectly shaped bounds data")
+    #
+    if __newClip:
+        __Vector = __Vector.clip(
+            __Bounds[:,0].reshape(__Vector.shape),
+            __Bounds[:,1].reshape(__Vector.shape),
+            )
+    else:
+        __Vector = numpy.max(numpy.hstack((__Vector.reshape((-1,1)),numpy.asmatrix(__Bounds)[:,0])),axis=1)
+        __Vector = numpy.min(numpy.hstack((__Vector.reshape((-1,1)),numpy.asmatrix(__Bounds)[:,1])),axis=1)
+        __Vector = numpy.asarray(__Vector)
+    #
+    return __Vector
+
+# ==============================================================================
+def Apply3DVarRecentringOnEnsemble(__EnXn, __EnXf, __Ynpu, __HO, __R, __B, __Betaf):
+    "Recentre l'ensemble Xn autour de l'analyse 3DVAR"
+    #
+    Xf = EnsembleMean( __EnXf )
+    Pf = Covariance( asCovariance=EnsembleErrorCovariance(__EnXf) )
+    Pf = (1 - __Betaf) * __B + __Betaf * Pf
+    #
+    selfB = PartialAlgorithm("3DVAR")
+    selfB._parameters["Minimizer"] = "LBFGSB"
+    selfB._parameters["MaximumNumberOfSteps"] = 15000
+    selfB._parameters["CostDecrementTolerance"] = 1.e-7
+    selfB._parameters["ProjectedGradientTolerance"] = -1
+    selfB._parameters["GradientNormTolerance"] = 1.e-05
+    selfB._parameters["StoreInternalVariables"] = False
+    selfB._parameters["optiprint"] = -1
+    selfB._parameters["optdisp"] = 0
+    selfB._parameters["Bounds"] = None
+    selfB._parameters["InitializationPoint"] = Xf
+    std3dvar(selfB, Xf, __Ynpu, None, __HO, None, None, __R, Pf, None)
+    Xa = selfB.get("Analysis")[-1].reshape((-1,1))
+    del selfB
+    #
+    return Xa + EnsembleOfAnomalies( __EnXn )
+
+# ==============================================================================
+def c2ukf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
+    """
+    Constrained Unscented Kalman Filter
+    """
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA._parameters["StoreInternalVariables"] = True
+    selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
+    #
+    L     = Xb.size
+    Alpha = selfA._parameters["Alpha"]
+    Beta  = selfA._parameters["Beta"]
+    if selfA._parameters["Kappa"] == 0:
+        if selfA._parameters["EstimationOf"] == "State":
+            Kappa = 0
+        elif selfA._parameters["EstimationOf"] == "Parameters":
+            Kappa = 3 - L
+    else:
+        Kappa = selfA._parameters["Kappa"]
+    Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
+    Gamma  = math.sqrt( L + Lambda )
+    #
+    Ww = []
+    Ww.append( 0. )
+    for i in range(2*L):
+        Ww.append( 1. / (2.*(L + Lambda)) )
+    #
+    Wm = numpy.array( Ww )
+    Wm[0] = Lambda / (L + Lambda)
+    Wc = numpy.array( Ww )
+    Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
+    #
+    # Opérateurs
+    Hm = HO["Direct"].appliedControledFormTo
+    #
+    if selfA._parameters["EstimationOf"] == "State":
+        Mm = EM["Direct"].appliedControledFormTo
+    #
+    if CM is not None and "Tangent" in CM and U is not None:
+        Cm = CM["Tangent"].asMatrix(Xb)
+    else:
+        Cm = None
+    #
+    # Durée d'observation et tailles
+    if hasattr(Y,"stepnumber"):
+        duration = Y.stepnumber()
+        __p = numpy.cumprod(Y.shape())[-1]
+    else:
+        duration = 2
+        __p = numpy.array(Y).size
+    #
+    # Précalcul des inversions de B et R
+    if selfA._parameters["StoreInternalVariables"] \
+        or selfA._toStore("CostFunctionJ") \
+        or selfA._toStore("CostFunctionJb") \
+        or selfA._toStore("CostFunctionJo") \
+        or selfA._toStore("CurrentOptimum") \
+        or selfA._toStore("APosterioriCovariance"):
+        BI = B.getI()
+        RI = R.getI()
+    #
+    __n = Xb.size
+    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
+    #
+    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+        Xn = Xb
+        if hasattr(B,"asfullmatrix"):
+            Pn = B.asfullmatrix(__n)
+        else:
+            Pn = B
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        selfA.StoredVariables["Analysis"].store( Xb )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
+    elif selfA._parameters["nextStep"]:
+        Xn = selfA._getInternalState("Xn")
+        Pn = selfA._getInternalState("Pn")
+    #
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        XaMin            = Xn
+        previousJMinimum = numpy.finfo(float).max
+    #
+    for step in range(duration-1):
+        if hasattr(Y,"store"):
+            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
+        else:
+            Ynpu = numpy.ravel( Y ).reshape((__p,1))
         #
-        variables = variables + step
-        if bounds is not None:
-            while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
-                step      = step/2.
-                variables = variables - step
-        residus   = mesures - numpy.ravel( func(variables) )
-        surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
+        if U is not None:
+            if hasattr(U,"store") and len(U)>1:
+                Un = numpy.ravel( U[step] ).reshape((-1,1))
+            elif hasattr(U,"store") and len(U)==1:
+                Un = numpy.ravel( U[0] ).reshape((-1,1))
+            else:
+                Un = numpy.ravel( U ).reshape((-1,1))
+        else:
+            Un = None
         #
-        while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
-            step      = step/2.
-            variables = variables - step
-            residus   = mesures - numpy.ravel( func(variables) )
-            surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
+        Pndemi = numpy.real(scipy.linalg.sqrtm(Pn))
+        Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
+        nbSpts = 2*Xn.size+1
         #
-        increment     = lastsurrogate-surrogate
-        poids         = 1./(epsilon+numpy.abs(residus))
-        veps          = 1. - 2. * quantile - residus * poids
-        lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
+        if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
+            for point in range(nbSpts):
+                Xnp[:,point] = ApplyBounds( Xnp[:,point], selfA._parameters["Bounds"] )
+        #
+        XEtnnp = []
+        for point in range(nbSpts):
+            if selfA._parameters["EstimationOf"] == "State":
+                XEtnnpi = numpy.asarray( Mm( (Xnp[:,point], Un) ) ).reshape((-1,1))
+                if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
+                    Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
+                    XEtnnpi = XEtnnpi + Cm @ Un
+                if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
+                    XEtnnpi = ApplyBounds( XEtnnpi, selfA._parameters["Bounds"] )
+            elif selfA._parameters["EstimationOf"] == "Parameters":
+                # --- > Par principe, M = Id, Q = 0
+                XEtnnpi = Xnp[:,point]
+            XEtnnp.append( numpy.ravel(XEtnnpi).reshape((-1,1)) )
+        XEtnnp = numpy.concatenate( XEtnnp, axis=1 )
+        #
+        Xncm = ( XEtnnp * Wm ).sum(axis=1)
+        #
+        if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
+            Xncm = ApplyBounds( Xncm, selfA._parameters["Bounds"] )
+        #
+        if selfA._parameters["EstimationOf"] == "State":        Pnm = Q
+        elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
+        for point in range(nbSpts):
+            Pnm += Wc[i] * ((XEtnnp[:,point]-Xncm).reshape((-1,1)) * (XEtnnp[:,point]-Xncm))
+        #
+        if selfA._parameters["EstimationOf"] == "Parameters" and selfA._parameters["Bounds"] is not None:
+            Pnmdemi = selfA._parameters["Reconditioner"] * numpy.real(scipy.linalg.sqrtm(Pnm))
+        else:
+            Pnmdemi = numpy.real(scipy.linalg.sqrtm(Pnm))
+        #
+        Xnnp = numpy.hstack([Xncm.reshape((-1,1)), Xncm.reshape((-1,1))+Gamma*Pnmdemi, Xncm.reshape((-1,1))-Gamma*Pnmdemi])
+        #
+        if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
+            for point in range(nbSpts):
+                Xnnp[:,point] = ApplyBounds( Xnnp[:,point], selfA._parameters["Bounds"] )
+        #
+        Ynnp = []
+        for point in range(nbSpts):
+            if selfA._parameters["EstimationOf"] == "State":
+                Ynnpi = Hm( (Xnnp[:,point], None) )
+            elif selfA._parameters["EstimationOf"] == "Parameters":
+                Ynnpi = Hm( (Xnnp[:,point], Un) )
+            Ynnp.append( numpy.ravel(Ynnpi).reshape((-1,1)) )
+        Ynnp = numpy.concatenate( Ynnp, axis=1 )
+        #
+        Yncm = ( Ynnp * Wm ).sum(axis=1)
+        #
+        Pyyn = R
+        Pxyn = 0.
+        for point in range(nbSpts):
+            Pyyn += Wc[i] * ((Ynnp[:,point]-Yncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
+            Pxyn += Wc[i] * ((Xnnp[:,point]-Xncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
+        #
+        _Innovation  = Ynpu - Yncm.reshape((-1,1))
+        if selfA._parameters["EstimationOf"] == "Parameters":
+            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
+                _Innovation = _Innovation - Cm @ Un
+        #
+        Kn = Pxyn * Pyyn.I
+        Xn = Xncm.reshape((-1,1)) + Kn * _Innovation
+        Pn = Pnm - Kn * Pyyn * Kn.T
+        #
+        if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
+            Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
+        #
+        Xa = Xn # Pointeurs
+        #--------------------------
+        selfA._setInternalState("Xn", Xn)
+        selfA._setInternalState("Pn", Pn)
+        #--------------------------
+        #
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        # ---> avec analysis
+        selfA.StoredVariables["Analysis"].store( Xa )
+        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) )
+        if selfA._toStore("InnovationAtCurrentAnalysis"):
+            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
+        # ---> avec current state
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CurrentState"):
+            selfA.StoredVariables["CurrentState"].store( Xn )
+        if selfA._toStore("ForecastState"):
+            selfA.StoredVariables["ForecastState"].store( Xncm )
+        if selfA._toStore("ForecastCovariance"):
+            selfA.StoredVariables["ForecastCovariance"].store( Pnm )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( Xncm - Xa )
+        if selfA._toStore("InnovationAtCurrentState"):
+            selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
+        if selfA._toStore("SimulatedObservationAtCurrentState") \
+            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm )
+        # ---> autres
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CostFunctionJ") \
+            or selfA._toStore("CostFunctionJb") \
+            or selfA._toStore("CostFunctionJo") \
+            or selfA._toStore("CurrentOptimum") \
+            or selfA._toStore("APosterioriCovariance"):
+            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
+            J   = Jb + Jo
+            selfA.StoredVariables["CostFunctionJb"].store( Jb )
+            selfA.StoredVariables["CostFunctionJo"].store( Jo )
+            selfA.StoredVariables["CostFunctionJ" ].store( J )
+            #
+            if selfA._toStore("IndexOfOptimum") \
+                or selfA._toStore("CurrentOptimum") \
+                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
+                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+            if selfA._toStore("IndexOfOptimum"):
+                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+            if selfA._toStore("CurrentOptimum"):
+                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
+            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
+            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
+        if selfA._parameters["EstimationOf"] == "Parameters" \
+            and J < previousJMinimum:
+            previousJMinimum    = J
+            XaMin               = Xa
+            if selfA._toStore("APosterioriCovariance"):
+                covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
     #
-    # Mesure d'écart
-    # --------------
-    Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
+    # Stockage final supplémentaire de l'optimum en estimation de paramètres
+    # ----------------------------------------------------------------------
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        selfA.StoredVariables["Analysis"].store( XaMin )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
     #
-    return variables, Ecart, [n,p,iteration,increment,0]
+    return 0
 
 # ==============================================================================
+def cekf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
+    """
+    Contrained Extended Kalman Filter
+    """
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA._parameters["StoreInternalVariables"] = True
+    selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
+    #
+    # Opérateurs
+    H = HO["Direct"].appliedControledFormTo
+    #
+    if selfA._parameters["EstimationOf"] == "State":
+        M = EM["Direct"].appliedControledFormTo
+    #
+    if CM is not None and "Tangent" in CM and U is not None:
+        Cm = CM["Tangent"].asMatrix(Xb)
+    else:
+        Cm = None
+    #
+    # Durée d'observation et tailles
+    if hasattr(Y,"stepnumber"):
+        duration = Y.stepnumber()
+        __p = numpy.cumprod(Y.shape())[-1]
+    else:
+        duration = 2
+        __p = numpy.array(Y).size
+    #
+    # Précalcul des inversions de B et R
+    if selfA._parameters["StoreInternalVariables"] \
+        or selfA._toStore("CostFunctionJ") \
+        or selfA._toStore("CostFunctionJb") \
+        or selfA._toStore("CostFunctionJo") \
+        or selfA._toStore("CurrentOptimum") \
+        or selfA._toStore("APosterioriCovariance"):
+        BI = B.getI()
+        RI = R.getI()
+    #
+    __n = Xb.size
+    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
+    #
+    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+        Xn = Xb
+        Pn = B
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        selfA.StoredVariables["Analysis"].store( Xb )
+        if selfA._toStore("APosterioriCovariance"):
+            if hasattr(B,"asfullmatrix"):
+                selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
+            else:
+                selfA.StoredVariables["APosterioriCovariance"].store( B )
+        selfA._setInternalState("seed", numpy.random.get_state())
+    elif selfA._parameters["nextStep"]:
+        Xn = selfA._getInternalState("Xn")
+        Pn = selfA._getInternalState("Pn")
+    #
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        XaMin            = Xn
+        previousJMinimum = numpy.finfo(float).max
+    #
+    for step in range(duration-1):
+        if hasattr(Y,"store"):
+            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
+        else:
+            Ynpu = numpy.ravel( Y ).reshape((__p,1))
+        #
+        Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
+        Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
+        Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
+        Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
+        #
+        if selfA._parameters["EstimationOf"] == "State":
+            Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
+            Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
+            Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
+            Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
+        #
+        if U is not None:
+            if hasattr(U,"store") and len(U)>1:
+                Un = numpy.ravel( U[step] ).reshape((-1,1))
+            elif hasattr(U,"store") and len(U)==1:
+                Un = numpy.ravel( U[0] ).reshape((-1,1))
+            else:
+                Un = numpy.ravel( U ).reshape((-1,1))
+        else:
+            Un = None
+        #
+        if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
+            Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
+        #
+        if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
+            Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
+            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
+                Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
+                Xn_predicted = Xn_predicted + Cm @ Un
+            Pn_predicted = Q + Mt * (Pn * Ma)
+        elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
+            # --- > Par principe, M = Id, Q = 0
+            Xn_predicted = Xn
+            Pn_predicted = Pn
+        #
+        if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
+            Xn_predicted = ApplyBounds( Xn_predicted, selfA._parameters["Bounds"] )
+        #
+        if selfA._parameters["EstimationOf"] == "State":
+            HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
+            _Innovation  = Ynpu - HX_predicted
+        elif selfA._parameters["EstimationOf"] == "Parameters":
+            HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
+            _Innovation  = Ynpu - HX_predicted
+            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
+                _Innovation = _Innovation - Cm @ Un
+        #
+        Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
+        Xn = Xn_predicted + Kn * _Innovation
+        Pn = Pn_predicted - Kn * Ht * Pn_predicted
+        #
+        if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
+            Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
+        #
+        Xa = Xn # Pointeurs
+        #--------------------------
+        selfA._setInternalState("Xn", Xn)
+        selfA._setInternalState("Pn", Pn)
+        #--------------------------
+        #
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        # ---> avec analysis
+        selfA.StoredVariables["Analysis"].store( Xa )
+        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
+        if selfA._toStore("InnovationAtCurrentAnalysis"):
+            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
+        # ---> avec current state
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CurrentState"):
+            selfA.StoredVariables["CurrentState"].store( Xn )
+        if selfA._toStore("ForecastState"):
+            selfA.StoredVariables["ForecastState"].store( Xn_predicted )
+        if selfA._toStore("ForecastCovariance"):
+            selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
+        if selfA._toStore("InnovationAtCurrentState"):
+            selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
+        if selfA._toStore("SimulatedObservationAtCurrentState") \
+            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
+        # ---> autres
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CostFunctionJ") \
+            or selfA._toStore("CostFunctionJb") \
+            or selfA._toStore("CostFunctionJo") \
+            or selfA._toStore("CurrentOptimum") \
+            or selfA._toStore("APosterioriCovariance"):
+            Jb  = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
+            Jo  = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
+            J   = Jb + Jo
+            selfA.StoredVariables["CostFunctionJb"].store( Jb )
+            selfA.StoredVariables["CostFunctionJo"].store( Jo )
+            selfA.StoredVariables["CostFunctionJ" ].store( J )
+            #
+            if selfA._toStore("IndexOfOptimum") \
+                or selfA._toStore("CurrentOptimum") \
+                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
+                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+            if selfA._toStore("IndexOfOptimum"):
+                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+            if selfA._toStore("CurrentOptimum"):
+                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
+            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
+            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
+        if selfA._parameters["EstimationOf"] == "Parameters" \
+            and J < previousJMinimum:
+            previousJMinimum    = J
+            XaMin               = Xa
+            if selfA._toStore("APosterioriCovariance"):
+                covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
+    #
+    # Stockage final supplémentaire de l'optimum en estimation de paramètres
+    # ----------------------------------------------------------------------
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        selfA.StoredVariables["Analysis"].store( XaMin )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
+    #
+    return 0
 
-def _BackgroundEnsembleGeneration( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
-    "Génération d'un ensemble d'ébauche de taille _nbmembers-1"
-    # ~ numpy.random.seed(1234567)
-    if _nbmembers < 1:
-        raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
-    if _withSVD:
-        U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
-        _nbctl = len(_bgcenter)
-        if _nbmembers > _nbctl:
-            _Z = numpy.concatenate((numpy.dot(
-                numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
-                numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
-        else:
-            _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
-        _Zca = _CenteredAnomalies(_Z, _nbmembers)
-        BackgroundEnsemble = (_bgcenter + _Zca.T).T
-    else:
-        if max(abs(_bgcovariance.flatten())) > 0:
-            _nbctl = len(_bgcenter)
-            _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
-            _Zca = _CenteredAnomalies(_Z, _nbmembers)
-            BackgroundEnsemble = (_bgcenter + _Zca.T).T
-        else:
-            BackgroundEnsemble = numpy.tile([_bgcenter],(_nbmembers,1)).T
-    return BackgroundEnsemble
-
-def _CenteredAnomalies(Zr, N):
-    """
-    Génère une matrice d'anomalies centrées selon les notes manuscrites de MB
-    et conforme au code de PS avec eps = -1
-    """
-    eps = -1
-    Q = numpy.eye(N-1)-numpy.ones((N-1,N-1))/numpy.sqrt(N)/(numpy.sqrt(N)-eps)
-    Q = numpy.concatenate((Q, [eps*numpy.ones(N-1)/numpy.sqrt(N)]), axis=0)
-    R, _ = numpy.linalg.qr(numpy.random.normal(size = (N-1,N-1)))
-    Q = numpy.dot(Q,R)
-    Zr = numpy.dot(Q,Zr)
-    return Zr.T
-
-def _IEnKF_cycle_Lag_1_SDA_GN(
-        E0         = None,
-        yObs       = None,
-        RIdemi     = None,
-        Mnnpu      = None,
-        Hn         = None,
-        variant    = "IEnKF", # IEnKF or IEKF
-        iMaximum   = 15000,
-        sTolerance = mfp,
-        jTolerance = mfp,
-        epsilonE   = 1e-5,
-        nbPS       = 0,  # nbPreviousSteps
-        ):
-    # 201206
-    if logging.getLogger().level < logging.WARNING:
-        assert len(E0.shape) == 2, "Ensemble E0 is not well formed: not of shape 2!"
-        assert len(RIdemi.shape) == 2, "R^{-1/2} is not well formed: not of shape 2!"
-        assert variant in ("IEnKF", "IEKF"), "Variant has to be IEnKF or IEKF"
-    #
-    nbCtl, nbMbr = E0.shape
-    nbObs = yObs.size
-    #
-    if logging.getLogger().level < logging.WARNING:
-        assert RIdemi.shape[0] == RIdemi.shape[1] == nbObs, "R^{-1} not of good size: not of size nbObs!"
-    #
-    yo  = yObs.reshape((nbObs,1))
-    IN  = numpy.identity(nbMbr)
-    if variant == "IEnKF":
-        T    = numpy.identity(nbMbr)
-        Tinv = numpy.identity(nbMbr)
-    x00 = numpy.mean(E0, axis = 1)
-    Ah0 = E0 - x00
-    Ap0 = numpy.linalg.pinv( Ah0.T.dot(Ah0) )
-    if logging.getLogger().level < logging.WARNING:
-        assert len(Ah0.shape) == 2, "Ensemble A0 is not well formed, of shape 2!"
-        assert Ah0.shape[0] == nbCtl and Ah0.shape[1] == nbMbr, "Ensemble A0 is not well shaped!"
-        assert abs(max(numpy.mean(Ah0, axis = 1))) < nbMbr*mpr, "Ensemble A0 seems not to be centered!"
-    #
-    def _convergence_condition(j, dx, JCurr, JPrev):
-        if j > iMaximum:
-            logging.debug("Convergence on maximum number of iterations per cycle, that reach the limit of %i."%iMaximum)
-            return True
-        #---------
-        if j == 1:
-            _deltaOnJ = 1.
-        else:
-            _deltaOnJ = abs(JCurr - JPrev) / JPrev
-        if _deltaOnJ <= jTolerance:
-            logging.debug("Convergence on cost decrement tolerance, that is below the threshold of %.1e."%jTolerance)
-            return True
-        #---------
-        _deltaOnX = numpy.linalg.norm(dx)
-        if _deltaOnX <= sTolerance:
-            logging.debug("Convergence on norm of state correction, that is below the threshold of %.1e."%sTolerance)
-            return True # En correction de l'état
-        #---------
-        return False
-    #
-    St = dict([(k,[]) for k in [
-        "CurrentState", "CurrentEnsemble",
-        "CostFunctionJb", "CostFunctionJo", "CostFunctionJ",
-        ]])
-    #
-    j, convergence, JPrev = 1, False, numpy.nan
-    x1 = x00
-    while not convergence:
-        logging.debug("Internal IEnKS step number %i"%j)
-        St["CurrentState"].append( x1.squeeze() )
-        if variant == "IEnKF": # Transform
-            E1 = x1 + Ah0.dot(T)
-        else: # IEKF
-            E1 = x1 + epsilonE * Ah0
-        St["CurrentEnsemble"].append( E1 )
-        E2  = numpy.array([Mnnpu(_x) for _x in E1.T]).reshape((nbCtl, nbMbr)) # Evolution 1->2
-        HEL = numpy.array([Hn(_x) for _x in E2.T]).T     # Observation à 2
-        yLm = numpy.mean( HEL, axis = 1).reshape((nbObs,1))
-        HA2 = HEL - yLm
-        if variant == "IEnKF":
-            HA2 = HA2.dot(Tinv)
-        else:
-            HA2 = HA2 / epsilonE
-        RIdemidy = RIdemi.dot(yo - yLm)
-        xs = RIdemidy / math.sqrt(nbMbr-1)
-        ES = RIdemi.dot(HA2) / math.sqrt(nbMbr-1)
-        G  = numpy.linalg.inv(IN + ES.T.dot(ES))
-        xb = G.dot(ES.T.dot(xs))
-        dx = Ah0.dot(xb) + Ah0.dot(G.dot(Ap0.dot(Ah0.T.dot(x00 - x1))))
-        #
-        Jb = float(dx.T.dot(dx))
-        Jo = float(RIdemidy.T.dot(RIdemidy))
-        J  = Jo + Jb
-        logging.debug("Values for cost functions are: J = %.5e  Jo = %.5e  Jb = %.5e"%(J,Jo,Jb))
-        St["CostFunctionJb"].append( Jb )
-        St["CostFunctionJo"].append( Jo )
-        St["CostFunctionJ"].append( J )
-        #
-        x1 = x1 + dx
-        j = j + 1
-        convergence = _convergence_condition(j, dx, J, JPrev)
-        JPrev = J
-        #
-        if variant == "IEnKF":
-            T    = numpy.real_if_close(scipy.linalg.sqrtm(G))
-            Tinv = numpy.linalg.inv(T)
-    #
-    # Stocke le dernier pas
-    x2 = numpy.mean( E2, axis = 1)
-    if variant == "IEKF":
-        A2 = E2 - x2
-        A2 = A2.dot(numpy.linalg.cholesky(G)) / epsilonE
-        E2 = x2 + A2
-    St["CurrentState"].append( x2.squeeze() )
-    St["CurrentEnsemble"].append( E2 )
-    #
-    IndexMin = numpy.argmin( St["CostFunctionJ"][nbPS:] ) + nbPS
-    xa = St["CurrentState"][IndexMin]
-    Ea = St["CurrentEnsemble"][IndexMin]
-    #
-    return (xa, Ea, St)
-
-def ienkf(
-        xb         = None,          # Background (None si E0)
-        E0         = None,          # Background ensemble (None si xb)
-        yObs       = None,          # Observation (série)
-        B          = None,          # B
-        RIdemi     = None,          # R^(-1/2)
-        Mnnpu      = None,          # Evolution operator
-        Hn         = None,          # Observation operator
-        variant    = "IEnKF",       # IEnKF or IEKF
-        nMembers   = 5,             # Number of members
-        sMaximum   = 0,             # Number of spinup steps
-        cMaximum   = 15000,         # Number of steps or cycles
-        iMaximum   = 15000,         # Number of iterations per cycle
-        sTolerance = mfp,           # State correction tolerance
-        jTolerance = mfp,           # Cost decrement tolerance
-        epsilon    = 1e-5,
-        inflation  = 1.,
-        nbPS       = 0,             # Number of previous steps
-        setSeed    = None,
-        ):
+# ==============================================================================
+def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula"):
+    """
+    EnKS
+    """
     #
-    # Initial
-    if setSeed is not None: numpy.random.seed(setSeed)
-    if E0 is None: E0 = _BackgroundEnsembleGeneration( xb, B, nMembers)
-    #
-    # Spinup
-    # ------
-    #
-    # Cycles
-    # ------
-    xa, Ea, Sa = [xb,], [E0,], [{}]
-    for step in range(cMaximum):
-        if hasattr(yObs,"store"):         Ynpu = numpy.ravel( yObs[step+1] )
-        elif type(yObs) in [list, tuple]: Ynpu = numpy.ravel( yObs[step+1] )
-        else:                             Ynpu = numpy.ravel( yObs )
-        #
-        (xa_c, Ea_c, Sa_c) = _IEnKF_cycle_Lag_1_SDA_GN(
-            E0,
-            Ynpu,
-            RIdemi,
-            Mnnpu,
-            Hn,
-            variant,
-            iMaximum,
-            sTolerance,
-            jTolerance,
-            epsilon,
-            nbPS,
-            )
-        xa.append( xa_c )
-        Ea.append( Ea_c )
-        Sa.append( Sa_c )
-        #
-        # Inflation for next cycle
-        E0 = xa_c + inflation * (Ea_c - xa_c)
-    #
-    return (xa, Ea, Sa)
-
-def _IEnKS_cycle_Lag_L_SDA_GN(
-        E0         = None,
-        yObs       = None,
-        RIdemi     = None,
-        Mnnpu      = None,
-        Hn         = None,
-        method     = "Transform",
-        iMaximum   = 15000,
-        sTolerance = mfp,
-        jTolerance = mfp,
-        Lag        = 1,
-        epsilon    = -1.,
-        nbPS       = 0,
-        ):
-    # 201407 & 201905
-    if logging.getLogger().level < logging.WARNING:
-        assert len(E0.shape) == 2, "Ensemble E0 is not well formed: not of shape 2!"
-        assert len(RIdemi.shape) == 2, "R^{-1/2} is not well formed: not of shape 2!"
-        assert method in ("Transform", "Bundle"), "Method has to be Transform or Bundle"
-    #
-    nbCtl, nbMbr = E0.shape
-    nbObs = yObs.size
-    #
-    if logging.getLogger().level < logging.WARNING:
-        assert RIdemi.shape[0] == RIdemi.shape[1] == nbObs, "R^{-1} not of good size: not of size nbObs!"
-    #
-    yo  = yObs.reshape((nbObs,1))
-    IN  = numpy.identity(nbMbr)
-    if method == "Transform":
-        T    = numpy.identity(nbMbr)
-        Tinv = numpy.identity(nbMbr)
-    x00 = numpy.mean(E0, axis = 1)
-    Ah0 = E0 - x00
-    Am0  = (1/math.sqrt(nbMbr - 1)) * Ah0
-    w   = numpy.zeros((nbMbr,1))
-    if logging.getLogger().level < logging.WARNING:
-        assert len(Ah0.shape) == 2, "Ensemble A0 is not well formed, of shape 2!"
-        assert Ah0.shape[0] == nbCtl and Ah0.shape[1] == nbMbr, "Ensemble A0 is not well shaped!"
-        assert abs(max(numpy.mean(Ah0, axis = 1))) < nbMbr*mpr, "Ensemble A0 seems not to be centered!"
-    #
-    def _convergence_condition(j, dw, JCurr, JPrev):
-        if j > iMaximum:
-            logging.debug("Convergence on maximum number of iterations per cycle, that reach the limit of %i."%iMaximum)
-            return True
-        #---------
-        if j == 1:
-            _deltaOnJ = 1.
-        else:
-            _deltaOnJ = abs(JCurr - JPrev) / JPrev
-        if _deltaOnJ <= jTolerance:
-            logging.debug("Convergence on cost decrement tolerance, that is below the threshold of %.1e."%jTolerance)
-            return True
-        #---------
-        _deltaOnW = numpy.sqrt(numpy.mean(dw.squeeze()**2))
-        if _deltaOnW <= sTolerance:
-            logging.debug("Convergence on norm of weights correction, that is below the threshold of %.1e."%sTolerance)
-            return True # En correction des poids
-        #---------
-        return False
-    #
-    St = dict([(k,[]) for k in [
-        "CurrentState", "CurrentEnsemble", "CurrentWeights",
-        "CostFunctionJb", "CostFunctionJo", "CostFunctionJ",
-        ]])
-    #
-    j, convergence, JPrev = 1, False, numpy.nan
-    while not convergence:
-        logging.debug("Internal IEnKS step number %i"%j)
-        x0 = x00 + Am0.dot( w )
-        St["CurrentState"].append( x0.squeeze() )
-        if method == "Transform":
-            E0 = x0 + Ah0.dot(T)
-        else:
-            E0 = x0 + epsilon * Am0
-        St["CurrentEnsemble"].append( E0 )
-        Ek = E0
-        yHmean = numpy.mean(E0, axis = 1)
-        for k in range(1, Lag+1):
-            Ek  = numpy.array([Mnnpu(_x) for _x in Ek.T]).reshape((nbCtl, nbMbr)) # Evolution 0->L
-            if method == "Transform":
-                yHmean = Mnnpu(yHmean)
-        HEL = numpy.array([Hn(_x) for _x in Ek.T]).T     # Observation à L
-        #
-        if method == "Transform":
-            yLm = Hn( yHmean ).reshape((nbObs,1))
-            YL = RIdemi.dot( (HEL - numpy.mean( HEL, axis = 1).reshape((nbObs,1))).dot(Tinv) ) / math.sqrt(nbMbr-1)
-        else:
-            yLm = numpy.mean( HEL, axis = 1).reshape((nbObs,1))
-            YL = RIdemi.dot(HEL - yLm) / epsilon
-        dy = RIdemi.dot(yo - yLm)
-        #
-        Jb = float(w.T.dot(w))
-        Jo = float(dy.T.dot(dy))
-        J  = Jo + Jb
-        logging.debug("Values for cost functions are: J = %.5e  Jo = %.5e  Jb = %.5e"%(J,Jo,Jb))
-        St["CurrentWeights"].append( w.squeeze() )
-        St["CostFunctionJb"].append( Jb )
-        St["CostFunctionJo"].append( Jo )
-        St["CostFunctionJ"].append( J )
-        if method == "Transform":
-            GradJ = w - YL.T.dot(dy)
-            HTild = IN + YL.T.dot(YL)
-        else:
-            GradJ = (nbMbr - 1)*w - YL.T.dot(RIdemi.dot(dy))
-            HTild = (nbMbr - 1)*IN + YL.T.dot(RIdemi.dot(YL))
-        HTild = numpy.array(HTild, dtype=float)
-        dw = numpy.linalg.solve( HTild, numpy.array(GradJ, dtype=float) )
-        w = w - dw
-        j = j + 1
-        convergence = _convergence_condition(j, dw, J, JPrev)
-        JPrev = J
-        #
-        if method == "Transform":
-            (U, s, _) = numpy.linalg.svd(HTild, full_matrices=False) # Hess = U s V
-            T    = U.dot(numpy.diag(numpy.sqrt(1./s)).dot(U.T))   # T = Hess^(-1/2)
-            Tinv = U.dot(numpy.diag(numpy.sqrt(s)).dot(U.T))      # Tinv = T^(-1)
-    #
-    # Stocke le dernier pas
-    St["CurrentState"].append( numpy.mean( Ek, axis = 1).squeeze() )
-    St["CurrentEnsemble"].append( Ek )
-    #
-    IndexMin = numpy.argmin( St["CostFunctionJ"][nbPS:] ) + nbPS
-    xa = St["CurrentState"][IndexMin]
-    Ea = St["CurrentEnsemble"][IndexMin]
-    #
-    return (xa, Ea, St)
-
-def ienks(
-        xb         = None,          # Background
-        yObs       = None,          # Observation (série)
-        E0         = None,          # Background ensemble
-        B          = None,          # B
-        RIdemi     = None,          # R^(-1/2)
-        Mnnpu      = None,          # Evolution operator
-        Hn         = None,          # Observation operator
-        method     = "Transform",   # Bundle ou Transform
-        nMembers   = 5,             # Number of members
-        cMaximum   = 15000,         # Number of steps or cycles
-        iMaximum   = 15000,         # Number of iterations per cycle
-        sTolerance = mfp,           # Weights correction tolerance
-        jTolerance = mfp,           # Cost decrement tolerance
-        Lag        = 1,             # Lenght of smoothing window
-        epsilon    = -1.,
-        inflation  = 1.,
-        nbPS       = 0,             # Number of previous steps
-        setSeed    = None,
-        ):
+    # Opérateurs
+    H = HO["Direct"].appliedControledFormTo
     #
-    # Initial
-    if setSeed is not None: numpy.random.seed(setSeed)
-    if E0 is None: E0 = _BackgroundEnsembleGeneration( xb, B, nMembers)
-    #
-    # Spinup
-    # ------
-    #
-    # Cycles
-    # ------
-    xa, Ea, Sa = [], [], []
-    for i in range(Lag): # Lag void results
-        xa.append([])
-        Ea.append([])
-        Sa.append([])
-    for i in range(Lag,cMaximum):
-        (xa_c, Ea_c, Sa_c) = _IEnKS_cycle_Lag_L_SDA_GN(
-            E0,
-            yObs[i-Lag:i],
-            RIdemi,
-            Mnnpu,
-            Hn,
-            method,
-            iMaximum,
-            sTolerance,
-            jTolerance,
-            Lag,
-            epsilon,
-            nbPS,
-            )
-        xa.append( xa_c )
-        Ea.append( Ea_c )
-        Sa.append( Sa_c )
+    if selfA._parameters["EstimationOf"] == "State":
+        M = EM["Direct"].appliedControledFormTo
+    #
+    if CM is not None and "Tangent" in CM and U is not None:
+        Cm = CM["Tangent"].asMatrix(Xb)
+    else:
+        Cm = None
+    #
+    # Précalcul des inversions de B et R
+    RIdemi = R.sqrtmI()
+    #
+    # Durée d'observation et tailles
+    LagL = selfA._parameters["SmootherLagL"]
+    if (not hasattr(Y,"store")) or (not hasattr(Y,"stepnumber")):
+        raise ValueError("Fixed-lag smoother requires a series of observation")
+    if Y.stepnumber() < LagL:
+        raise ValueError("Fixed-lag smoother requires a series of observation greater then the lag L")
+    duration = Y.stepnumber()
+    __p = numpy.cumprod(Y.shape())[-1]
+    __n = Xb.size
+    __m = selfA._parameters["NumberOfMembers"]
+    #
+    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+        selfA.StoredVariables["Analysis"].store( Xb )
+        if selfA._toStore("APosterioriCovariance"):
+            if hasattr(B,"asfullmatrix"):
+                selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
+            else:
+                selfA.StoredVariables["APosterioriCovariance"].store( B )
+    #
+    # Calcul direct initial (on privilégie la mémorisation au recalcul)
+    __seed = numpy.random.get_state()
+    selfB = copy.deepcopy(selfA)
+    selfB._parameters["StoreSupplementaryCalculations"] = ["CurrentEnsembleState"]
+    if VariantM == "EnKS16-KalmanFilterFormula":
+        etkf(selfB, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM = "KalmanFilterFormula")
+    else:
+        raise ValueError("VariantM has to be chosen in the authorized methods list.")
+    if LagL > 0:
+        EL  = selfB.StoredVariables["CurrentEnsembleState"][LagL-1]
+    else:
+        EL = EnsembleOfBackgroundPerturbations( Xb, None, __m ) # Cf. etkf
+    selfA._parameters["SetSeed"] = numpy.random.set_state(__seed)
+    #
+    for step in range(LagL,duration-1):
+        #
+        sEL = selfB.StoredVariables["CurrentEnsembleState"][step+1-LagL:step+1]
+        sEL.append(None)
+        #
+        if hasattr(Y,"store"):
+            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
+        else:
+            Ynpu = numpy.ravel( Y ).reshape((__p,1))
         #
-        # Inflation for next cycle
-        E0 = xa_c + inflation * (Ea_c - xa_c)
+        if U is not None:
+            if hasattr(U,"store") and len(U)>1:
+                Un = numpy.ravel( U[step] ).reshape((-1,1))
+            elif hasattr(U,"store") and len(U)==1:
+                Un = numpy.ravel( U[0] ).reshape((-1,1))
+            else:
+                Un = numpy.ravel( U ).reshape((-1,1))
+        else:
+            Un = None
+        #
+        #--------------------------
+        if VariantM == "EnKS16-KalmanFilterFormula":
+            if selfA._parameters["EstimationOf"] == "State": # Forecast
+                EL = M( [(EL[:,i], Un) for i in range(__m)],
+                    argsAsSerie = True,
+                    returnSerieAsArrayMatrix = True )
+                EL = EnsemblePerturbationWithGivenCovariance( EL, Q )
+                EZ = H( [(EL[:,i], Un) for i in range(__m)],
+                    argsAsSerie = True,
+                    returnSerieAsArrayMatrix = True )
+                if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
+                    Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
+                    EZ = EZ + Cm @ Un
+            elif selfA._parameters["EstimationOf"] == "Parameters":
+                # --- > Par principe, M = Id, Q = 0
+                EZ = H( [(EL[:,i], Un) for i in range(__m)],
+                    argsAsSerie = True,
+                    returnSerieAsArrayMatrix = True )
+            #
+            vEm   = EL.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+            vZm   = EZ.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
+            #
+            mS    = RIdemi @ EnsembleOfAnomalies( EZ, vZm, 1./math.sqrt(__m-1) )
+            mS    = mS.reshape((-1,__m)) # Pour dimension 1
+            delta = RIdemi @ ( Ynpu - vZm )
+            mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
+            vw    = mT @ mS.T @ delta
+            #
+            Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
+            mU    = numpy.identity(__m)
+            wTU   = (vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU)
+            #
+            EX    = EnsembleOfAnomalies( EL, vEm, 1./math.sqrt(__m-1) )
+            EL    = vEm + EX @ wTU
+            #
+            sEL[LagL] = EL
+            for irl in range(LagL): # Lissage des L précédentes analysis
+                vEm = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+                EX = EnsembleOfAnomalies( sEL[irl], vEm, 1./math.sqrt(__m-1) )
+                sEL[irl] = vEm + EX @ wTU
+            #
+            # Conservation de l'analyse retrospective d'ordre 0 avant rotation
+            Xa = sEL[0].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+            if selfA._toStore("APosterioriCovariance"):
+                EXn = sEL[0]
+            #
+            for irl in range(LagL):
+                sEL[irl] = sEL[irl+1]
+            sEL[LagL] = None
+        #--------------------------
+        else:
+            raise ValueError("VariantM has to be chosen in the authorized methods list.")
+        #
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        # ---> avec analysis
+        selfA.StoredVariables["Analysis"].store( Xa )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(EXn) )
+    #
+    # Stockage des dernières analyses incomplètement remises à jour
+    for irl in range(LagL):
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        Xa = sEL[irl].mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+        selfA.StoredVariables["Analysis"].store( Xa )
+    #
+    return 0
+
+# ==============================================================================
+def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
+    VariantM="KalmanFilterFormula",
+    Hybrid=None,
+    ):
+    """
+    Ensemble-Transform EnKF
+    """
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA._parameters["StoreInternalVariables"] = True
+    #
+    # Opérateurs
+    H = HO["Direct"].appliedControledFormTo
+    #
+    if selfA._parameters["EstimationOf"] == "State":
+        M = EM["Direct"].appliedControledFormTo
+    #
+    if CM is not None and "Tangent" in CM and U is not None:
+        Cm = CM["Tangent"].asMatrix(Xb)
+    else:
+        Cm = None
+    #
+    # Durée d'observation et tailles
+    if hasattr(Y,"stepnumber"):
+        duration = Y.stepnumber()
+        __p = numpy.cumprod(Y.shape())[-1]
+    else:
+        duration = 2
+        __p = numpy.array(Y).size
+    #
+    # Précalcul des inversions de B et R
+    if selfA._parameters["StoreInternalVariables"] \
+        or selfA._toStore("CostFunctionJ") \
+        or selfA._toStore("CostFunctionJb") \
+        or selfA._toStore("CostFunctionJo") \
+        or selfA._toStore("CurrentOptimum") \
+        or selfA._toStore("APosterioriCovariance"):
+        BI = B.getI()
+        RI = R.getI()
+    elif VariantM != "KalmanFilterFormula":
+        RI = R.getI()
+    if VariantM == "KalmanFilterFormula":
+        RIdemi = R.sqrtmI()
+    #
+    __n = Xb.size
+    __m = selfA._parameters["NumberOfMembers"]
+    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
+    previousJMinimum = numpy.finfo(float).max
+    #
+    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+        Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
+        selfA.StoredVariables["Analysis"].store( Xb )
+        if selfA._toStore("APosterioriCovariance"):
+            if hasattr(B,"asfullmatrix"):
+                selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
+            else:
+                selfA.StoredVariables["APosterioriCovariance"].store( B )
+        selfA._setInternalState("seed", numpy.random.get_state())
+    elif selfA._parameters["nextStep"]:
+        Xn = selfA._getInternalState("Xn")
+    #
+    for step in range(duration-1):
+        numpy.random.set_state(selfA._getInternalState("seed"))
+        if hasattr(Y,"store"):
+            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
+        else:
+            Ynpu = numpy.ravel( Y ).reshape((__p,1))
+        #
+        if U is not None:
+            if hasattr(U,"store") and len(U)>1:
+                Un = numpy.ravel( U[step] ).reshape((-1,1))
+            elif hasattr(U,"store") and len(U)==1:
+                Un = numpy.ravel( U[0] ).reshape((-1,1))
+            else:
+                Un = numpy.ravel( U ).reshape((-1,1))
+        else:
+            Un = None
+        #
+        if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
+            Xn = CovarianceInflation( Xn,
+                selfA._parameters["InflationType"],
+                selfA._parameters["InflationFactor"],
+                )
+        #
+        if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
+            EMX = M( [(Xn[:,i], Un) for i in range(__m)],
+                argsAsSerie = True,
+                returnSerieAsArrayMatrix = True )
+            Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
+            HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
+                argsAsSerie = True,
+                returnSerieAsArrayMatrix = True )
+            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
+                Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
+                Xn_predicted = Xn_predicted + Cm @ Un
+        elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
+            # --- > Par principe, M = Id, Q = 0
+            Xn_predicted = EMX = Xn
+            HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
+                argsAsSerie = True,
+                returnSerieAsArrayMatrix = True )
+        #
+        # Mean of forecast and observation of forecast
+        Xfm  = EnsembleMean( Xn_predicted )
+        Hfm  = EnsembleMean( HX_predicted )
+        #
+        # Anomalies
+        EaX   = EnsembleOfAnomalies( Xn_predicted, Xfm )
+        EaHX  = EnsembleOfAnomalies( HX_predicted, Hfm)
+        #
+        #--------------------------
+        if VariantM == "KalmanFilterFormula":
+            mS    = RIdemi * EaHX / math.sqrt(__m-1)
+            mS    = mS.reshape((-1,__m)) # Pour dimension 1
+            delta = RIdemi * ( Ynpu - Hfm )
+            mT    = numpy.linalg.inv( numpy.identity(__m) + mS.T @ mS )
+            vw    = mT @ mS.T @ delta
+            #
+            Tdemi = numpy.real(scipy.linalg.sqrtm(mT))
+            mU    = numpy.identity(__m)
+            #
+            EaX   = EaX / math.sqrt(__m-1)
+            Xn    = Xfm + EaX @ ( vw.reshape((__m,1)) + math.sqrt(__m-1) * Tdemi @ mU )
+        #--------------------------
+        elif VariantM == "Variational":
+            HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
+            def CostFunction(w):
+                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
+                _Jo = 0.5 * _A.T @ (RI * _A)
+                _Jb = 0.5 * (__m-1) * w.T @ w
+                _J  = _Jo + _Jb
+                return float(_J)
+            def GradientOfCostFunction(w):
+                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
+                _GardJo = - EaHX.T @ (RI * _A)
+                _GradJb = (__m-1) * w.reshape((__m,1))
+                _GradJ  = _GardJo + _GradJb
+                return numpy.ravel(_GradJ)
+            vw = scipy.optimize.fmin_cg(
+                f           = CostFunction,
+                x0          = numpy.zeros(__m),
+                fprime      = GradientOfCostFunction,
+                args        = (),
+                disp        = False,
+                )
+            #
+            Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
+            Htb = (__m-1) * numpy.identity(__m)
+            Hta = Hto + Htb
+            #
+            Pta = numpy.linalg.inv( Hta )
+            EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
+            #
+            Xn  = Xfm + EaX @ (vw[:,None] + EWa)
+        #--------------------------
+        elif VariantM == "FiniteSize11": # Jauge Boc2011
+            HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
+            def CostFunction(w):
+                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
+                _Jo = 0.5 * _A.T @ (RI * _A)
+                _Jb = 0.5 * __m * math.log(1 + 1/__m + w.T @ w)
+                _J  = _Jo + _Jb
+                return float(_J)
+            def GradientOfCostFunction(w):
+                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
+                _GardJo = - EaHX.T @ (RI * _A)
+                _GradJb = __m * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
+                _GradJ  = _GardJo + _GradJb
+                return numpy.ravel(_GradJ)
+            vw = scipy.optimize.fmin_cg(
+                f           = CostFunction,
+                x0          = numpy.zeros(__m),
+                fprime      = GradientOfCostFunction,
+                args        = (),
+                disp        = False,
+                )
+            #
+            Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
+            Htb = __m * \
+                ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
+                / (1 + 1/__m + vw.T @ vw)**2
+            Hta = Hto + Htb
+            #
+            Pta = numpy.linalg.inv( Hta )
+            EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
+            #
+            Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
+        #--------------------------
+        elif VariantM == "FiniteSize15": # Jauge Boc2015
+            HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
+            def CostFunction(w):
+                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
+                _Jo = 0.5 * _A.T * (RI * _A)
+                _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
+                _J  = _Jo + _Jb
+                return float(_J)
+            def GradientOfCostFunction(w):
+                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
+                _GardJo = - EaHX.T @ (RI * _A)
+                _GradJb = (__m+1) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w)
+                _GradJ  = _GardJo + _GradJb
+                return numpy.ravel(_GradJ)
+            vw = scipy.optimize.fmin_cg(
+                f           = CostFunction,
+                x0          = numpy.zeros(__m),
+                fprime      = GradientOfCostFunction,
+                args        = (),
+                disp        = False,
+                )
+            #
+            Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
+            Htb = (__m+1) * \
+                ( (1 + 1/__m + vw.T @ vw) * numpy.identity(__m) - 2 * vw @ vw.T ) \
+                / (1 + 1/__m + vw.T @ vw)**2
+            Hta = Hto + Htb
+            #
+            Pta = numpy.linalg.inv( Hta )
+            EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
+            #
+            Xn  = Xfm + EaX @ (vw.reshape((__m,1)) + EWa)
+        #--------------------------
+        elif VariantM == "FiniteSize16": # Jauge Boc2016
+            HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
+            def CostFunction(w):
+                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
+                _Jo = 0.5 * _A.T @ (RI * _A)
+                _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w / (__m-1))
+                _J  = _Jo + _Jb
+                return float(_J)
+            def GradientOfCostFunction(w):
+                _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
+                _GardJo = - EaHX.T @ (RI * _A)
+                _GradJb = ((__m+1) / (__m-1)) * w.reshape((__m,1)) / (1 + 1/__m + w.T @ w / (__m-1))
+                _GradJ  = _GardJo + _GradJb
+                return numpy.ravel(_GradJ)
+            vw = scipy.optimize.fmin_cg(
+                f           = CostFunction,
+                x0          = numpy.zeros(__m),
+                fprime      = GradientOfCostFunction,
+                args        = (),
+                disp        = False,
+                )
+            #
+            Hto = EaHX.T @ (RI * EaHX).reshape((-1,__m))
+            Htb = ((__m+1) / (__m-1)) * \
+                ( (1 + 1/__m + vw.T @ vw / (__m-1)) * numpy.identity(__m) - 2 * vw @ vw.T / (__m-1) ) \
+                / (1 + 1/__m + vw.T @ vw / (__m-1))**2
+            Hta = Hto + Htb
+            #
+            Pta = numpy.linalg.inv( Hta )
+            EWa = numpy.real(scipy.linalg.sqrtm((__m-1)*Pta)) # Partie imaginaire ~= 10^-18
+            #
+            Xn  = Xfm + EaX @ (vw[:,None] + EWa)
+        #--------------------------
+        else:
+            raise ValueError("VariantM has to be chosen in the authorized methods list.")
+        #
+        if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
+            Xn = CovarianceInflation( Xn,
+                selfA._parameters["InflationType"],
+                selfA._parameters["InflationFactor"],
+                )
+        #
+        if Hybrid == "E3DVAR":
+            betaf = selfA._parameters["HybridCovarianceEquilibrium"]
+            Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
+        #
+        Xa = EnsembleMean( Xn )
+        #--------------------------
+        selfA._setInternalState("Xn", Xn)
+        selfA._setInternalState("seed", numpy.random.get_state())
+        #--------------------------
+        #
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CostFunctionJ") \
+            or selfA._toStore("CostFunctionJb") \
+            or selfA._toStore("CostFunctionJo") \
+            or selfA._toStore("APosterioriCovariance") \
+            or selfA._toStore("InnovationAtCurrentAnalysis") \
+            or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
+            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
+            _Innovation = Ynpu - _HXa
+        #
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        # ---> avec analysis
+        selfA.StoredVariables["Analysis"].store( Xa )
+        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
+        if selfA._toStore("InnovationAtCurrentAnalysis"):
+            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
+        # ---> avec current state
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CurrentState"):
+            selfA.StoredVariables["CurrentState"].store( Xn )
+        if selfA._toStore("ForecastState"):
+            selfA.StoredVariables["ForecastState"].store( EMX )
+        if selfA._toStore("ForecastCovariance"):
+            selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( EMX - Xa )
+        if selfA._toStore("InnovationAtCurrentState"):
+            selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
+        if selfA._toStore("SimulatedObservationAtCurrentState") \
+            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
+        # ---> autres
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CostFunctionJ") \
+            or selfA._toStore("CostFunctionJb") \
+            or selfA._toStore("CostFunctionJo") \
+            or selfA._toStore("CurrentOptimum") \
+            or selfA._toStore("APosterioriCovariance"):
+            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
+            J   = Jb + Jo
+            selfA.StoredVariables["CostFunctionJb"].store( Jb )
+            selfA.StoredVariables["CostFunctionJo"].store( Jo )
+            selfA.StoredVariables["CostFunctionJ" ].store( J )
+            #
+            if selfA._toStore("IndexOfOptimum") \
+                or selfA._toStore("CurrentOptimum") \
+                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
+                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+            if selfA._toStore("IndexOfOptimum"):
+                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+            if selfA._toStore("CurrentOptimum"):
+                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
+            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
+            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
+        if selfA._parameters["EstimationOf"] == "Parameters" \
+            and J < previousJMinimum:
+            previousJMinimum    = J
+            XaMin               = Xa
+            if selfA._toStore("APosterioriCovariance"):
+                covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
+        # ---> Pour les smoothers
+        if selfA._toStore("CurrentEnsembleState"):
+            selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
+    #
+    # Stockage final supplémentaire de l'optimum en estimation de paramètres
+    # ----------------------------------------------------------------------
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        selfA.StoredVariables["Analysis"].store( XaMin )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
+    #
+    return 0
+
+# ==============================================================================
+def exkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
+    """
+    Extended Kalman Filter
+    """
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA._parameters["StoreInternalVariables"] = True
+    #
+    # Opérateurs
+    H = HO["Direct"].appliedControledFormTo
+    #
+    if selfA._parameters["EstimationOf"] == "State":
+        M = EM["Direct"].appliedControledFormTo
+    #
+    if CM is not None and "Tangent" in CM and U is not None:
+        Cm = CM["Tangent"].asMatrix(Xb)
+    else:
+        Cm = None
+    #
+    # Durée d'observation et tailles
+    if hasattr(Y,"stepnumber"):
+        duration = Y.stepnumber()
+        __p = numpy.cumprod(Y.shape())[-1]
+    else:
+        duration = 2
+        __p = numpy.array(Y).size
+    #
+    # Précalcul des inversions de B et R
+    if selfA._parameters["StoreInternalVariables"] \
+        or selfA._toStore("CostFunctionJ") \
+        or selfA._toStore("CostFunctionJb") \
+        or selfA._toStore("CostFunctionJo") \
+        or selfA._toStore("CurrentOptimum") \
+        or selfA._toStore("APosterioriCovariance"):
+        BI = B.getI()
+        RI = R.getI()
+    #
+    __n = Xb.size
+    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
+    #
+    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+        Xn = Xb
+        Pn = B
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        selfA.StoredVariables["Analysis"].store( Xb )
+        if selfA._toStore("APosterioriCovariance"):
+            if hasattr(B,"asfullmatrix"):
+                selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
+            else:
+                selfA.StoredVariables["APosterioriCovariance"].store( B )
+        selfA._setInternalState("seed", numpy.random.get_state())
+    elif selfA._parameters["nextStep"]:
+        Xn = selfA._getInternalState("Xn")
+        Pn = selfA._getInternalState("Pn")
+    #
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        XaMin            = Xn
+        previousJMinimum = numpy.finfo(float).max
+    #
+    for step in range(duration-1):
+        if hasattr(Y,"store"):
+            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
+        else:
+            Ynpu = numpy.ravel( Y ).reshape((__p,1))
+        #
+        Ht = HO["Tangent"].asMatrix(ValueForMethodForm = Xn)
+        Ht = Ht.reshape(Ynpu.size,Xn.size) # ADAO & check shape
+        Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = Xn)
+        Ha = Ha.reshape(Xn.size,Ynpu.size) # ADAO & check shape
+        #
+        if selfA._parameters["EstimationOf"] == "State":
+            Mt = EM["Tangent"].asMatrix(ValueForMethodForm = Xn)
+            Mt = Mt.reshape(Xn.size,Xn.size) # ADAO & check shape
+            Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = Xn)
+            Ma = Ma.reshape(Xn.size,Xn.size) # ADAO & check shape
+        #
+        if U is not None:
+            if hasattr(U,"store") and len(U)>1:
+                Un = numpy.ravel( U[step] ).reshape((-1,1))
+            elif hasattr(U,"store") and len(U)==1:
+                Un = numpy.ravel( U[0] ).reshape((-1,1))
+            else:
+                Un = numpy.ravel( U ).reshape((-1,1))
+        else:
+            Un = None
+        #
+        if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
+            Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
+            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
+                Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
+                Xn_predicted = Xn_predicted + Cm @ Un
+            Pn_predicted = Q + Mt * (Pn * Ma)
+        elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
+            # --- > Par principe, M = Id, Q = 0
+            Xn_predicted = Xn
+            Pn_predicted = Pn
+        #
+        if selfA._parameters["EstimationOf"] == "State":
+            HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
+            _Innovation  = Ynpu - HX_predicted
+        elif selfA._parameters["EstimationOf"] == "Parameters":
+            HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
+            _Innovation  = Ynpu - HX_predicted
+            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
+                _Innovation = _Innovation - Cm @ Un
+        #
+        Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
+        Xn = Xn_predicted + Kn * _Innovation
+        Pn = Pn_predicted - Kn * Ht * Pn_predicted
+        #
+        Xa = Xn # Pointeurs
+        #--------------------------
+        selfA._setInternalState("Xn", Xn)
+        selfA._setInternalState("Pn", Pn)
+        #--------------------------
+        #
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        # ---> avec analysis
+        selfA.StoredVariables["Analysis"].store( Xa )
+        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( H((Xa, Un)) )
+        if selfA._toStore("InnovationAtCurrentAnalysis"):
+            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
+        # ---> avec current state
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CurrentState"):
+            selfA.StoredVariables["CurrentState"].store( Xn )
+        if selfA._toStore("ForecastState"):
+            selfA.StoredVariables["ForecastState"].store( Xn_predicted )
+        if selfA._toStore("ForecastCovariance"):
+            selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
+        if selfA._toStore("InnovationAtCurrentState"):
+            selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
+        if selfA._toStore("SimulatedObservationAtCurrentState") \
+            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
+        # ---> autres
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CostFunctionJ") \
+            or selfA._toStore("CostFunctionJb") \
+            or selfA._toStore("CostFunctionJo") \
+            or selfA._toStore("CurrentOptimum") \
+            or selfA._toStore("APosterioriCovariance"):
+            Jb  = float( 0.5 * (Xa - Xb).T @ (BI @ (Xa - Xb)) )
+            Jo  = float( 0.5 * _Innovation.T @ (RI @ _Innovation) )
+            J   = Jb + Jo
+            selfA.StoredVariables["CostFunctionJb"].store( Jb )
+            selfA.StoredVariables["CostFunctionJo"].store( Jo )
+            selfA.StoredVariables["CostFunctionJ" ].store( J )
+            #
+            if selfA._toStore("IndexOfOptimum") \
+                or selfA._toStore("CurrentOptimum") \
+                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
+                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+            if selfA._toStore("IndexOfOptimum"):
+                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+            if selfA._toStore("CurrentOptimum"):
+                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
+            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
+            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
+        if selfA._parameters["EstimationOf"] == "Parameters" \
+            and J < previousJMinimum:
+            previousJMinimum    = J
+            XaMin               = Xa
+            if selfA._toStore("APosterioriCovariance"):
+                covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
+    #
+    # Stockage final supplémentaire de l'optimum en estimation de paramètres
+    # ----------------------------------------------------------------------
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        selfA.StoredVariables["Analysis"].store( XaMin )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
+    #
+    return 0
+
+# ==============================================================================
+def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
+    BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
+    """
+    Iterative EnKF
+    """
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA._parameters["StoreInternalVariables"] = True
+    #
+    # Opérateurs
+    H = HO["Direct"].appliedControledFormTo
+    #
+    if selfA._parameters["EstimationOf"] == "State":
+        M = EM["Direct"].appliedControledFormTo
+    #
+    if CM is not None and "Tangent" in CM and U is not None:
+        Cm = CM["Tangent"].asMatrix(Xb)
+    else:
+        Cm = None
+    #
+    # Durée d'observation et tailles
+    if hasattr(Y,"stepnumber"):
+        duration = Y.stepnumber()
+        __p = numpy.cumprod(Y.shape())[-1]
+    else:
+        duration = 2
+        __p = numpy.array(Y).size
+    #
+    # Précalcul des inversions de B et R
+    if selfA._parameters["StoreInternalVariables"] \
+        or selfA._toStore("CostFunctionJ") \
+        or selfA._toStore("CostFunctionJb") \
+        or selfA._toStore("CostFunctionJo") \
+        or selfA._toStore("CurrentOptimum") \
+        or selfA._toStore("APosterioriCovariance"):
+        BI = B.getI()
+    RI = R.getI()
+    #
+    __n = Xb.size
+    __m = selfA._parameters["NumberOfMembers"]
+    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
+    previousJMinimum = numpy.finfo(float).max
+    #
+    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+        if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
+        else:                         Pn = B
+        Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
+        selfA.StoredVariables["Analysis"].store( Xb )
+        if selfA._toStore("APosterioriCovariance"):
+            if hasattr(B,"asfullmatrix"):
+                selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
+            else:
+                selfA.StoredVariables["APosterioriCovariance"].store( B )
+        selfA._setInternalState("seed", numpy.random.get_state())
+    elif selfA._parameters["nextStep"]:
+        Xn = selfA._getInternalState("Xn")
+    #
+    for step in range(duration-1):
+        numpy.random.set_state(selfA._getInternalState("seed"))
+        if hasattr(Y,"store"):
+            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
+        else:
+            Ynpu = numpy.ravel( Y ).reshape((__p,1))
+        #
+        if U is not None:
+            if hasattr(U,"store") and len(U)>1:
+                Un = numpy.ravel( U[step] ).reshape((-1,1))
+            elif hasattr(U,"store") and len(U)==1:
+                Un = numpy.ravel( U[0] ).reshape((-1,1))
+            else:
+                Un = numpy.ravel( U ).reshape((-1,1))
+        else:
+            Un = None
+        #
+        if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
+            Xn = CovarianceInflation( Xn,
+                selfA._parameters["InflationType"],
+                selfA._parameters["InflationFactor"],
+                )
+        #
+        #--------------------------
+        if VariantM == "IEnKF12":
+            Xfm = numpy.ravel(Xn.mean(axis=1, dtype=mfp).astype('float'))
+            EaX = EnsembleOfAnomalies( Xn ) / math.sqrt(__m-1)
+            __j = 0
+            Deltaw = 1
+            if not BnotT:
+                Ta  = numpy.identity(__m)
+            vw  = numpy.zeros(__m)
+            while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
+                vx1 = (Xfm + EaX @ vw).reshape((__n,1))
+                #
+                if BnotT:
+                    E1 = vx1 + _epsilon * EaX
+                else:
+                    E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
+                #
+                if selfA._parameters["EstimationOf"] == "State": # Forecast + Q
+                    E2 = M( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
+                        argsAsSerie = True,
+                        returnSerieAsArrayMatrix = True )
+                elif selfA._parameters["EstimationOf"] == "Parameters":
+                    # --- > Par principe, M = Id
+                    E2 = Xn
+                vx2 = E2.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+                vy1 = H((vx2, Un)).reshape((__p,1))
+                #
+                HE2 = H( [(E2[:,i,numpy.newaxis], Un) for i in range(__m)],
+                    argsAsSerie = True,
+                    returnSerieAsArrayMatrix = True )
+                vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
+                #
+                if BnotT:
+                    EaY = (HE2 - vy2) / _epsilon
+                else:
+                    EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
+                #
+                GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy1 )))
+                mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
+                Deltaw = - numpy.linalg.solve(mH,GradJ)
+                #
+                vw = vw + Deltaw
+                #
+                if not BnotT:
+                    Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
+                #
+                __j = __j + 1
+            #
+            A2 = EnsembleOfAnomalies( E2 )
+            #
+            if BnotT:
+                Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
+                A2 = math.sqrt(__m-1) * A2 @ Ta / _epsilon
+            #
+            Xn = vx2 + A2
+        #--------------------------
+        else:
+            raise ValueError("VariantM has to be chosen in the authorized methods list.")
+        #
+        if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
+            Xn = CovarianceInflation( Xn,
+                selfA._parameters["InflationType"],
+                selfA._parameters["InflationFactor"],
+                )
+        #
+        Xa = EnsembleMean( Xn )
+        #--------------------------
+        selfA._setInternalState("Xn", Xn)
+        selfA._setInternalState("seed", numpy.random.get_state())
+        #--------------------------
+        #
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CostFunctionJ") \
+            or selfA._toStore("CostFunctionJb") \
+            or selfA._toStore("CostFunctionJo") \
+            or selfA._toStore("APosterioriCovariance") \
+            or selfA._toStore("InnovationAtCurrentAnalysis") \
+            or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
+            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
+            _Innovation = Ynpu - _HXa
+        #
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        # ---> avec analysis
+        selfA.StoredVariables["Analysis"].store( Xa )
+        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
+        if selfA._toStore("InnovationAtCurrentAnalysis"):
+            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
+        # ---> avec current state
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CurrentState"):
+            selfA.StoredVariables["CurrentState"].store( Xn )
+        if selfA._toStore("ForecastState"):
+            selfA.StoredVariables["ForecastState"].store( E2 )
+        if selfA._toStore("ForecastCovariance"):
+            selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(E2) )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( E2 - Xa )
+        if selfA._toStore("InnovationAtCurrentState"):
+            selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
+        if selfA._toStore("SimulatedObservationAtCurrentState") \
+            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
+        # ---> autres
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CostFunctionJ") \
+            or selfA._toStore("CostFunctionJb") \
+            or selfA._toStore("CostFunctionJo") \
+            or selfA._toStore("CurrentOptimum") \
+            or selfA._toStore("APosterioriCovariance"):
+            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
+            J   = Jb + Jo
+            selfA.StoredVariables["CostFunctionJb"].store( Jb )
+            selfA.StoredVariables["CostFunctionJo"].store( Jo )
+            selfA.StoredVariables["CostFunctionJ" ].store( J )
+            #
+            if selfA._toStore("IndexOfOptimum") \
+                or selfA._toStore("CurrentOptimum") \
+                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
+                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+            if selfA._toStore("IndexOfOptimum"):
+                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+            if selfA._toStore("CurrentOptimum"):
+                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
+            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
+            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
+        if selfA._parameters["EstimationOf"] == "Parameters" \
+            and J < previousJMinimum:
+            previousJMinimum    = J
+            XaMin               = Xa
+            if selfA._toStore("APosterioriCovariance"):
+                covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
+        # ---> Pour les smoothers
+        if selfA._toStore("CurrentEnsembleState"):
+            selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
+    #
+    # Stockage final supplémentaire de l'optimum en estimation de paramètres
+    # ----------------------------------------------------------------------
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        selfA.StoredVariables["Analysis"].store( XaMin )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
+    #
+    return 0
+
+# ==============================================================================
+def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
+    """
+    3DVAR incrémental
+    """
+    #
+    # Initialisations
+    # ---------------
+    Hm = HO["Direct"].appliedTo
+    #
+    BI = B.getI()
+    RI = R.getI()
+    #
+    HXb = numpy.asarray(Hm( Xb )).reshape((-1,1))
+    Innovation = Y - HXb
+    #
+    # Outer Loop
+    # ----------
+    iOuter = 0
+    J      = 1./mpr
+    DeltaJ = 1./mpr
+    Xr     = numpy.asarray(selfA._parameters["InitializationPoint"]).reshape((-1,1))
+    while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
+        #
+        # Inner Loop
+        # ----------
+        Ht = HO["Tangent"].asMatrix(Xr)
+        Ht = Ht.reshape(Y.size,Xr.size) # ADAO & check shape
+        #
+        # Définition de la fonction-coût
+        # ------------------------------
+        def CostFunction(dx):
+            _dX  = numpy.asarray(dx).reshape((-1,1))
+            if selfA._parameters["StoreInternalVariables"] or \
+                selfA._toStore("CurrentState") or \
+                selfA._toStore("CurrentOptimum"):
+                selfA.StoredVariables["CurrentState"].store( Xb + _dX )
+            _HdX = (Ht @ _dX).reshape((-1,1))
+            _dInnovation = Innovation - _HdX
+            if selfA._toStore("SimulatedObservationAtCurrentState") or \
+                selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HXb + _HdX )
+            if selfA._toStore("InnovationAtCurrentState"):
+                selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
+            #
+            Jb  = float( 0.5 * _dX.T * (BI * _dX) )
+            Jo  = float( 0.5 * _dInnovation.T * (RI * _dInnovation) )
+            J   = Jb + Jo
+            #
+            selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
+            selfA.StoredVariables["CostFunctionJb"].store( Jb )
+            selfA.StoredVariables["CostFunctionJo"].store( Jo )
+            selfA.StoredVariables["CostFunctionJ" ].store( J )
+            if selfA._toStore("IndexOfOptimum") or \
+                selfA._toStore("CurrentOptimum") or \
+                selfA._toStore("CostFunctionJAtCurrentOptimum") or \
+                selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
+                selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
+                selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+            if selfA._toStore("IndexOfOptimum"):
+                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+            if selfA._toStore("CurrentOptimum"):
+                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
+            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
+            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+            return J
+        #
+        def GradientOfCostFunction(dx):
+            _dX          = numpy.ravel( dx )
+            _HdX         = (Ht @ _dX).reshape((-1,1))
+            _dInnovation = Innovation - _HdX
+            GradJb       = BI @ _dX
+            GradJo       = - Ht.T @ (RI * _dInnovation)
+            GradJ        = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
+            return GradJ
+        #
+        # Minimisation de la fonctionnelle
+        # --------------------------------
+        nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
+        #
+        if selfA._parameters["Minimizer"] == "LBFGSB":
+            # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
+            if "0.19" <= scipy.version.version <= "1.1.0":
+                import lbfgsbhlt as optimiseur
+            else:
+                import scipy.optimize as optimiseur
+            Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
+                func        = CostFunction,
+                x0          = numpy.zeros(Xb.size),
+                fprime      = GradientOfCostFunction,
+                args        = (),
+                bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
+                maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
+                factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
+                pgtol       = selfA._parameters["ProjectedGradientTolerance"],
+                iprint      = selfA._parameters["optiprint"],
+                )
+            nfeval = Informations['funcalls']
+            rc     = Informations['warnflag']
+        elif selfA._parameters["Minimizer"] == "TNC":
+            Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
+                func        = CostFunction,
+                x0          = numpy.zeros(Xb.size),
+                fprime      = GradientOfCostFunction,
+                args        = (),
+                bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
+                maxfun      = selfA._parameters["MaximumNumberOfSteps"],
+                pgtol       = selfA._parameters["ProjectedGradientTolerance"],
+                ftol        = selfA._parameters["CostDecrementTolerance"],
+                messages    = selfA._parameters["optmessages"],
+                )
+        elif selfA._parameters["Minimizer"] == "CG":
+            Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
+                f           = CostFunction,
+                x0          = numpy.zeros(Xb.size),
+                fprime      = GradientOfCostFunction,
+                args        = (),
+                maxiter     = selfA._parameters["MaximumNumberOfSteps"],
+                gtol        = selfA._parameters["GradientNormTolerance"],
+                disp        = selfA._parameters["optdisp"],
+                full_output = True,
+                )
+        elif selfA._parameters["Minimizer"] == "NCG":
+            Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
+                f           = CostFunction,
+                x0          = numpy.zeros(Xb.size),
+                fprime      = GradientOfCostFunction,
+                args        = (),
+                maxiter     = selfA._parameters["MaximumNumberOfSteps"],
+                avextol     = selfA._parameters["CostDecrementTolerance"],
+                disp        = selfA._parameters["optdisp"],
+                full_output = True,
+                )
+        elif selfA._parameters["Minimizer"] == "BFGS":
+            Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
+                f           = CostFunction,
+                x0          = numpy.zeros(Xb.size),
+                fprime      = GradientOfCostFunction,
+                args        = (),
+                maxiter     = selfA._parameters["MaximumNumberOfSteps"],
+                gtol        = selfA._parameters["GradientNormTolerance"],
+                disp        = selfA._parameters["optdisp"],
+                full_output = True,
+                )
+        else:
+            raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
+        #
+        IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+        MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
+        #
+        if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
+            Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
+        else:
+            Minimum = Xb + Minimum.reshape((-1,1))
+        #
+        Xr     = Minimum
+        DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
+        iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
+    #
+    Xa = Xr
+    #--------------------------
+    #
+    selfA.StoredVariables["Analysis"].store( Xa )
+    #
+    if selfA._toStore("OMA") or \
+        selfA._toStore("SigmaObs2") or \
+        selfA._toStore("SimulationQuantiles") or \
+        selfA._toStore("SimulatedObservationAtOptimum"):
+        if selfA._toStore("SimulatedObservationAtCurrentState"):
+            HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
+        elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
+        else:
+            HXa = Hm( Xa )
+    #
+    if selfA._toStore("APosterioriCovariance") or \
+        selfA._toStore("SimulationQuantiles") or \
+        selfA._toStore("JacobianMatrixAtOptimum") or \
+        selfA._toStore("KalmanGainAtOptimum"):
+        HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
+        HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
+    if selfA._toStore("APosterioriCovariance") or \
+        selfA._toStore("SimulationQuantiles") or \
+        selfA._toStore("KalmanGainAtOptimum"):
+        HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
+        HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
+    if selfA._toStore("APosterioriCovariance") or \
+        selfA._toStore("SimulationQuantiles"):
+        A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
+    if selfA._toStore("APosterioriCovariance"):
+        selfA.StoredVariables["APosterioriCovariance"].store( A )
+    if selfA._toStore("JacobianMatrixAtOptimum"):
+        selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
+    if selfA._toStore("KalmanGainAtOptimum"):
+        if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
+        elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
+        selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
+    #
+    # Calculs et/ou stockages supplémentaires
+    # ---------------------------------------
+    if selfA._toStore("Innovation") or \
+        selfA._toStore("SigmaObs2") or \
+        selfA._toStore("MahalanobisConsistency") or \
+        selfA._toStore("OMB"):
+        d  = Y - HXb
+    if selfA._toStore("Innovation"):
+        selfA.StoredVariables["Innovation"].store( d )
+    if selfA._toStore("BMA"):
+        selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
+    if selfA._toStore("OMA"):
+        selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
+    if selfA._toStore("OMB"):
+        selfA.StoredVariables["OMB"].store( d )
+    if selfA._toStore("SigmaObs2"):
+        TraceR = R.trace(Y.size)
+        selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
+    if selfA._toStore("MahalanobisConsistency"):
+        selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
+    if selfA._toStore("SimulationQuantiles"):
+        QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
+    if selfA._toStore("SimulatedObservationAtBackground"):
+        selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
+    if selfA._toStore("SimulatedObservationAtOptimum"):
+        selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
+    #
+    return 0
+
+# ==============================================================================
+def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
+    VariantM="MLEF13", BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000,
+    Hybrid=None,
+    ):
+    """
+    Maximum Likelihood Ensemble Filter
+    """
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA._parameters["StoreInternalVariables"] = True
+    #
+    # Opérateurs
+    H = HO["Direct"].appliedControledFormTo
+    #
+    if selfA._parameters["EstimationOf"] == "State":
+        M = EM["Direct"].appliedControledFormTo
+    #
+    if CM is not None and "Tangent" in CM and U is not None:
+        Cm = CM["Tangent"].asMatrix(Xb)
+    else:
+        Cm = None
+    #
+    # Durée d'observation et tailles
+    if hasattr(Y,"stepnumber"):
+        duration = Y.stepnumber()
+        __p = numpy.cumprod(Y.shape())[-1]
+    else:
+        duration = 2
+        __p = numpy.array(Y).size
+    #
+    # Précalcul des inversions de B et R
+    if selfA._parameters["StoreInternalVariables"] \
+        or selfA._toStore("CostFunctionJ") \
+        or selfA._toStore("CostFunctionJb") \
+        or selfA._toStore("CostFunctionJo") \
+        or selfA._toStore("CurrentOptimum") \
+        or selfA._toStore("APosterioriCovariance"):
+        BI = B.getI()
+    RI = R.getI()
+    #
+    __n = Xb.size
+    __m = selfA._parameters["NumberOfMembers"]
+    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
+    previousJMinimum = numpy.finfo(float).max
+    #
+    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+        Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
+        selfA.StoredVariables["Analysis"].store( Xb )
+        if selfA._toStore("APosterioriCovariance"):
+            if hasattr(B,"asfullmatrix"):
+                selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
+            else:
+                selfA.StoredVariables["APosterioriCovariance"].store( B )
+        selfA._setInternalState("seed", numpy.random.get_state())
+    elif selfA._parameters["nextStep"]:
+        Xn = selfA._getInternalState("Xn")
+    #
+    for step in range(duration-1):
+        numpy.random.set_state(selfA._getInternalState("seed"))
+        if hasattr(Y,"store"):
+            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
+        else:
+            Ynpu = numpy.ravel( Y ).reshape((__p,1))
+        #
+        if U is not None:
+            if hasattr(U,"store") and len(U)>1:
+                Un = numpy.ravel( U[step] ).reshape((-1,1))
+            elif hasattr(U,"store") and len(U)==1:
+                Un = numpy.ravel( U[0] ).reshape((-1,1))
+            else:
+                Un = numpy.ravel( U ).reshape((-1,1))
+        else:
+            Un = None
+        #
+        if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
+            Xn = CovarianceInflation( Xn,
+                selfA._parameters["InflationType"],
+                selfA._parameters["InflationFactor"],
+                )
+        #
+        if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
+            EMX = M( [(Xn[:,i], Un) for i in range(__m)],
+                argsAsSerie = True,
+                returnSerieAsArrayMatrix = True )
+            Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
+            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
+                Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
+                Xn_predicted = Xn_predicted + Cm @ Un
+        elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
+            # --- > Par principe, M = Id, Q = 0
+            Xn_predicted = EMX = Xn
+        #
+        #--------------------------
+        if VariantM == "MLEF13":
+            Xfm = numpy.ravel(Xn_predicted.mean(axis=1, dtype=mfp).astype('float'))
+            EaX = EnsembleOfAnomalies( Xn_predicted, Xfm, 1./math.sqrt(__m-1) )
+            Ua  = numpy.identity(__m)
+            __j = 0
+            Deltaw = 1
+            if not BnotT:
+                Ta  = numpy.identity(__m)
+            vw  = numpy.zeros(__m)
+            while numpy.linalg.norm(Deltaw) >= _e and __j <= _jmax:
+                vx1 = (Xfm + EaX @ vw).reshape((__n,1))
+                #
+                if BnotT:
+                    E1 = vx1 + _epsilon * EaX
+                else:
+                    E1 = vx1 + math.sqrt(__m-1) * EaX @ Ta
+                #
+                HE2 = H( [(E1[:,i,numpy.newaxis], Un) for i in range(__m)],
+                    argsAsSerie = True,
+                    returnSerieAsArrayMatrix = True )
+                vy2 = HE2.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
+                #
+                if BnotT:
+                    EaY = (HE2 - vy2) / _epsilon
+                else:
+                    EaY = ( (HE2 - vy2) @ numpy.linalg.inv(Ta) ) / math.sqrt(__m-1)
+                #
+                GradJ = numpy.ravel(vw[:,None] - EaY.transpose() @ (RI * ( Ynpu - vy2 )))
+                mH = numpy.identity(__m) + EaY.transpose() @ (RI * EaY).reshape((-1,__m))
+                Deltaw = - numpy.linalg.solve(mH,GradJ)
+                #
+                vw = vw + Deltaw
+                #
+                if not BnotT:
+                    Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
+                #
+                __j = __j + 1
+            #
+            if BnotT:
+                Ta = numpy.real(scipy.linalg.sqrtm(numpy.linalg.inv( mH )))
+            #
+            Xn = vx1 + math.sqrt(__m-1) * EaX @ Ta @ Ua
+        #--------------------------
+        else:
+            raise ValueError("VariantM has to be chosen in the authorized methods list.")
+        #
+        if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
+            Xn = CovarianceInflation( Xn,
+                selfA._parameters["InflationType"],
+                selfA._parameters["InflationFactor"],
+                )
+        #
+        if Hybrid == "E3DVAR":
+            betaf = selfA._parameters["HybridCovarianceEquilibrium"]
+            Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
+        #
+        Xa = EnsembleMean( Xn )
+        #--------------------------
+        selfA._setInternalState("Xn", Xn)
+        selfA._setInternalState("seed", numpy.random.get_state())
+        #--------------------------
+        #
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CostFunctionJ") \
+            or selfA._toStore("CostFunctionJb") \
+            or selfA._toStore("CostFunctionJo") \
+            or selfA._toStore("APosterioriCovariance") \
+            or selfA._toStore("InnovationAtCurrentAnalysis") \
+            or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
+            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
+            _Innovation = Ynpu - _HXa
+        #
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        # ---> avec analysis
+        selfA.StoredVariables["Analysis"].store( Xa )
+        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
+        if selfA._toStore("InnovationAtCurrentAnalysis"):
+            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
+        # ---> avec current state
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CurrentState"):
+            selfA.StoredVariables["CurrentState"].store( Xn )
+        if selfA._toStore("ForecastState"):
+            selfA.StoredVariables["ForecastState"].store( EMX )
+        if selfA._toStore("ForecastCovariance"):
+            selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( EMX - Xa )
+        if selfA._toStore("InnovationAtCurrentState"):
+            selfA.StoredVariables["InnovationAtCurrentState"].store( - HE2 + Ynpu )
+        if selfA._toStore("SimulatedObservationAtCurrentState") \
+            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HE2 )
+        # ---> autres
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CostFunctionJ") \
+            or selfA._toStore("CostFunctionJb") \
+            or selfA._toStore("CostFunctionJo") \
+            or selfA._toStore("CurrentOptimum") \
+            or selfA._toStore("APosterioriCovariance"):
+            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
+            J   = Jb + Jo
+            selfA.StoredVariables["CostFunctionJb"].store( Jb )
+            selfA.StoredVariables["CostFunctionJo"].store( Jo )
+            selfA.StoredVariables["CostFunctionJ" ].store( J )
+            #
+            if selfA._toStore("IndexOfOptimum") \
+                or selfA._toStore("CurrentOptimum") \
+                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
+                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+            if selfA._toStore("IndexOfOptimum"):
+                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+            if selfA._toStore("CurrentOptimum"):
+                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
+            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
+            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
+        if selfA._parameters["EstimationOf"] == "Parameters" \
+            and J < previousJMinimum:
+            previousJMinimum    = J
+            XaMin               = Xa
+            if selfA._toStore("APosterioriCovariance"):
+                covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
+        # ---> Pour les smoothers
+        if selfA._toStore("CurrentEnsembleState"):
+            selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
+    #
+    # Stockage final supplémentaire de l'optimum en estimation de paramètres
+    # ----------------------------------------------------------------------
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        selfA.StoredVariables["Analysis"].store( XaMin )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
+    #
+    return 0
+
+# ==============================================================================
+def mmqr(
+        func     = None,
+        x0       = None,
+        fprime   = None,
+        bounds   = None,
+        quantile = 0.5,
+        maxfun   = 15000,
+        toler    = 1.e-06,
+        y        = None,
+        ):
+    """
+    Implémentation informatique de l'algorithme MMQR, basée sur la publication :
+    David R. Hunter, Kenneth Lange, "Quantile Regression via an MM Algorithm",
+    Journal of Computational and Graphical Statistics, 9, 1, pp.60-77, 2000.
+    """
+    #
+    # Recuperation des donnees et informations initiales
+    # --------------------------------------------------
+    variables = numpy.ravel( x0 )
+    mesures   = numpy.ravel( y )
+    increment = sys.float_info[0]
+    p         = variables.size
+    n         = mesures.size
+    quantile  = float(quantile)
+    #
+    # Calcul des parametres du MM
+    # ---------------------------
+    tn      = float(toler) / n
+    e0      = -tn / math.log(tn)
+    epsilon = (e0-tn)/(1+math.log(e0))
+    #
+    # Calculs d'initialisation
+    # ------------------------
+    residus  = mesures - numpy.ravel( func( variables ) )
+    poids    = 1./(epsilon+numpy.abs(residus))
+    veps     = 1. - 2. * quantile - residus * poids
+    lastsurrogate = -numpy.sum(residus*veps) - (1.-2.*quantile)*numpy.sum(residus)
+    iteration = 0
+    #
+    # Recherche iterative
+    # -------------------
+    while (increment > toler) and (iteration < maxfun) :
+        iteration += 1
+        #
+        Derivees  = numpy.array(fprime(variables))
+        Derivees  = Derivees.reshape(n,p) # ADAO & check shape
+        DeriveesT = Derivees.transpose()
+        M         =   numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
+        SM        =   numpy.transpose(numpy.dot( DeriveesT , veps ))
+        step      = - numpy.linalg.lstsq( M, SM, rcond=-1 )[0]
+        #
+        variables = variables + step
+        if bounds is not None:
+            # Attention : boucle infinie à éviter si un intervalle est trop petit
+            while( (variables < numpy.ravel(numpy.asmatrix(bounds)[:,0])).any() or (variables > numpy.ravel(numpy.asmatrix(bounds)[:,1])).any() ):
+                step      = step/2.
+                variables = variables - step
+        residus   = mesures - numpy.ravel( func(variables) )
+        surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
+        #
+        while ( (surrogate > lastsurrogate) and ( max(list(numpy.abs(step))) > 1.e-16 ) ) :
+            step      = step/2.
+            variables = variables - step
+            residus   = mesures - numpy.ravel( func(variables) )
+            surrogate = numpy.sum(residus**2 * poids) + (4.*quantile-2.) * numpy.sum(residus)
+        #
+        increment     = lastsurrogate-surrogate
+        poids         = 1./(epsilon+numpy.abs(residus))
+        veps          = 1. - 2. * quantile - residus * poids
+        lastsurrogate = -numpy.sum(residus * veps) - (1.-2.*quantile)*numpy.sum(residus)
+    #
+    # Mesure d'écart
+    # --------------
+    Ecart = quantile * numpy.sum(residus) - numpy.sum( residus[residus<0] )
+    #
+    return variables, Ecart, [n,p,iteration,increment,0]
+
+# ==============================================================================
+def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
+    """
+    3DVAR multi-pas et multi-méthodes
+    """
+    #
+    # Initialisation
+    # --------------
+    if selfA._parameters["EstimationOf"] == "State":
+        M = EM["Direct"].appliedControledFormTo
+        if CM is not None and "Tangent" in CM and U is not None:
+            Cm = CM["Tangent"].asMatrix(Xb)
+        else:
+            Cm = None
+        #
+        if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+            Xn = numpy.ravel(Xb).reshape((-1,1))
+            selfA.StoredVariables["Analysis"].store( Xn )
+            if selfA._toStore("APosterioriCovariance"):
+                if hasattr(B,"asfullmatrix"):
+                    selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(Xn.size) )
+                else:
+                    selfA.StoredVariables["APosterioriCovariance"].store( B )
+            if selfA._toStore("ForecastState"):
+                selfA.StoredVariables["ForecastState"].store( Xn )
+        elif selfA._parameters["nextStep"]:
+            Xn = selfA._getInternalState("Xn")
+    else:
+        Xn = numpy.ravel(Xb).reshape((-1,1))
+    #
+    if hasattr(Y,"stepnumber"):
+        duration = Y.stepnumber()
+    else:
+        duration = 2
+    #
+    # Multi-pas
+    for step in range(duration-1):
+        if hasattr(Y,"store"):
+            Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
+        else:
+            Ynpu = numpy.ravel( Y ).reshape((-1,1))
+        #
+        if U is not None:
+            if hasattr(U,"store") and len(U)>1:
+                Un = numpy.ravel( U[step] ).reshape((-1,1))
+            elif hasattr(U,"store") and len(U)==1:
+                Un = numpy.ravel( U[0] ).reshape((-1,1))
+            else:
+                Un = numpy.ravel( U ).reshape((-1,1))
+        else:
+            Un = None
+        #
+        if selfA._parameters["EstimationOf"] == "State": # Forecast
+            Xn_predicted = M( (Xn, Un) )
+            if selfA._toStore("ForecastState"):
+                selfA.StoredVariables["ForecastState"].store( Xn_predicted )
+            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
+                Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
+                Xn_predicted = Xn_predicted + Cm @ Un
+        elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
+            # --- > Par principe, M = Id, Q = 0
+            Xn_predicted = Xn
+        Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
+        #
+        oneCycle(selfA, Xn_predicted, Ynpu, None, HO, None, None, R, B, None)
+        #
+        Xn = selfA.StoredVariables["Analysis"][-1]
+        #--------------------------
+        selfA._setInternalState("Xn", Xn)
+    #
+    return 0
+
+# ==============================================================================
+def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
+    """
+    3DVAR PSAS
+    """
+    #
+    # Initialisations
+    # ---------------
+    Hm = HO["Direct"].appliedTo
+    #
+    if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
+        HXb = numpy.asarray(Hm( Xb, HO["AppliedInX"]["HXb"] ))
+    else:
+        HXb = numpy.asarray(Hm( Xb ))
+    HXb = numpy.ravel( HXb ).reshape((-1,1))
+    if Y.size != HXb.size:
+        raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
+    if max(Y.shape) != max(HXb.shape):
+        raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
+    #
+    if selfA._toStore("JacobianMatrixAtBackground"):
+        HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
+        HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
+        selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
+    #
+    Ht = HO["Tangent"].asMatrix(Xb)
+    BHT = B * Ht.T
+    HBHTpR = R + Ht * BHT
+    Innovation = Y - HXb
+    #
+    Xini = numpy.zeros(Y.size)
+    #
+    # Définition de la fonction-coût
+    # ------------------------------
+    def CostFunction(w):
+        _W = numpy.asarray(w).reshape((-1,1))
+        if selfA._parameters["StoreInternalVariables"] or \
+            selfA._toStore("CurrentState") or \
+            selfA._toStore("CurrentOptimum"):
+            selfA.StoredVariables["CurrentState"].store( Xb + BHT @ _W )
+        if selfA._toStore("SimulatedObservationAtCurrentState") or \
+            selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT @ _W ) )
+        if selfA._toStore("InnovationAtCurrentState"):
+            selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
+        #
+        Jb  = float( 0.5 * _W.T @ (HBHTpR @ _W) )
+        Jo  = float( - _W.T @ Innovation )
+        J   = Jb + Jo
+        #
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
+        selfA.StoredVariables["CostFunctionJb"].store( Jb )
+        selfA.StoredVariables["CostFunctionJo"].store( Jo )
+        selfA.StoredVariables["CostFunctionJ" ].store( J )
+        if selfA._toStore("IndexOfOptimum") or \
+            selfA._toStore("CurrentOptimum") or \
+            selfA._toStore("CostFunctionJAtCurrentOptimum") or \
+            selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
+            selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
+            selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+        if selfA._toStore("IndexOfOptimum"):
+            selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+        if selfA._toStore("CurrentOptimum"):
+            selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
+        if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
+        if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+            selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+        if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+            selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+        if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+            selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+        return J
+    #
+    def GradientOfCostFunction(w):
+        _W = numpy.asarray(w).reshape((-1,1))
+        GradJb  = HBHTpR @ _W
+        GradJo  = - Innovation
+        GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
+        return GradJ
+    #
+    # Minimisation de la fonctionnelle
+    # --------------------------------
+    nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
+    #
+    if selfA._parameters["Minimizer"] == "LBFGSB":
+        if "0.19" <= scipy.version.version <= "1.1.0":
+            import lbfgsbhlt as optimiseur
+        else:
+            import scipy.optimize as optimiseur
+        Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
+            func        = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
+            factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
+            pgtol       = selfA._parameters["ProjectedGradientTolerance"],
+            iprint      = selfA._parameters["optiprint"],
+            )
+        nfeval = Informations['funcalls']
+        rc     = Informations['warnflag']
+    elif selfA._parameters["Minimizer"] == "TNC":
+        Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
+            func        = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            maxfun      = selfA._parameters["MaximumNumberOfSteps"],
+            pgtol       = selfA._parameters["ProjectedGradientTolerance"],
+            ftol        = selfA._parameters["CostDecrementTolerance"],
+            messages    = selfA._parameters["optmessages"],
+            )
+    elif selfA._parameters["Minimizer"] == "CG":
+        Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
+            f           = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
+            gtol        = selfA._parameters["GradientNormTolerance"],
+            disp        = selfA._parameters["optdisp"],
+            full_output = True,
+            )
+    elif selfA._parameters["Minimizer"] == "NCG":
+        Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
+            f           = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
+            avextol     = selfA._parameters["CostDecrementTolerance"],
+            disp        = selfA._parameters["optdisp"],
+            full_output = True,
+            )
+    elif selfA._parameters["Minimizer"] == "BFGS":
+        Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
+            f           = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
+            gtol        = selfA._parameters["GradientNormTolerance"],
+            disp        = selfA._parameters["optdisp"],
+            full_output = True,
+            )
+    else:
+        raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
+    #
+    IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+    MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
+    #
+    # Correction pour pallier a un bug de TNC sur le retour du Minimum
+    # ----------------------------------------------------------------
+    if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
+        Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
+    else:
+        Minimum = Xb + BHT @ Minimum.reshape((-1,1))
+    #
+    Xa = Minimum
+    #--------------------------
+    #
+    selfA.StoredVariables["Analysis"].store( Xa )
+    #
+    if selfA._toStore("OMA") or \
+        selfA._toStore("SigmaObs2") or \
+        selfA._toStore("SimulationQuantiles") or \
+        selfA._toStore("SimulatedObservationAtOptimum"):
+        if selfA._toStore("SimulatedObservationAtCurrentState"):
+            HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
+        elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
+        else:
+            HXa = Hm( Xa )
+    #
+    if selfA._toStore("APosterioriCovariance") or \
+        selfA._toStore("SimulationQuantiles") or \
+        selfA._toStore("JacobianMatrixAtOptimum") or \
+        selfA._toStore("KalmanGainAtOptimum"):
+        HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
+        HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
+    if selfA._toStore("APosterioriCovariance") or \
+        selfA._toStore("SimulationQuantiles") or \
+        selfA._toStore("KalmanGainAtOptimum"):
+        HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
+        HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
+    if selfA._toStore("APosterioriCovariance") or \
+        selfA._toStore("SimulationQuantiles"):
+        BI = B.getI()
+        RI = R.getI()
+        A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
+    if selfA._toStore("APosterioriCovariance"):
+        selfA.StoredVariables["APosterioriCovariance"].store( A )
+    if selfA._toStore("JacobianMatrixAtOptimum"):
+        selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
+    if selfA._toStore("KalmanGainAtOptimum"):
+        if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
+        elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
+        selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
+    #
+    # Calculs et/ou stockages supplémentaires
+    # ---------------------------------------
+    if selfA._toStore("Innovation") or \
+        selfA._toStore("SigmaObs2") or \
+        selfA._toStore("MahalanobisConsistency") or \
+        selfA._toStore("OMB"):
+        d  = Y - HXb
+    if selfA._toStore("Innovation"):
+        selfA.StoredVariables["Innovation"].store( d )
+    if selfA._toStore("BMA"):
+        selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
+    if selfA._toStore("OMA"):
+        selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
+    if selfA._toStore("OMB"):
+        selfA.StoredVariables["OMB"].store( d )
+    if selfA._toStore("SigmaObs2"):
+        TraceR = R.trace(Y.size)
+        selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
+    if selfA._toStore("MahalanobisConsistency"):
+        selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
+    if selfA._toStore("SimulationQuantiles"):
+        QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
+    if selfA._toStore("SimulatedObservationAtBackground"):
+        selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
+    if selfA._toStore("SimulatedObservationAtOptimum"):
+        selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
+    #
+    return 0
+
+# ==============================================================================
+def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
+    VariantM="KalmanFilterFormula16",
+    Hybrid=None,
+    ):
+    """
+    Stochastic EnKF
+    """
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA._parameters["StoreInternalVariables"] = True
+    #
+    # Opérateurs
+    H = HO["Direct"].appliedControledFormTo
+    #
+    if selfA._parameters["EstimationOf"] == "State":
+        M = EM["Direct"].appliedControledFormTo
+    #
+    if CM is not None and "Tangent" in CM and U is not None:
+        Cm = CM["Tangent"].asMatrix(Xb)
+    else:
+        Cm = None
+    #
+    # Durée d'observation et tailles
+    if hasattr(Y,"stepnumber"):
+        duration = Y.stepnumber()
+        __p = numpy.cumprod(Y.shape())[-1]
+    else:
+        duration = 2
+        __p = numpy.array(Y).size
+    #
+    # Précalcul des inversions de B et R
+    if selfA._parameters["StoreInternalVariables"] \
+        or selfA._toStore("CostFunctionJ") \
+        or selfA._toStore("CostFunctionJb") \
+        or selfA._toStore("CostFunctionJo") \
+        or selfA._toStore("CurrentOptimum") \
+        or selfA._toStore("APosterioriCovariance"):
+        BI = B.getI()
+        RI = R.getI()
+    #
+    __n = Xb.size
+    __m = selfA._parameters["NumberOfMembers"]
+    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
+    previousJMinimum = numpy.finfo(float).max
+    #
+    if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
+    else:                         Rn = R
+    #
+    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+        if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
+        else:                         Pn = B
+        Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
+        selfA.StoredVariables["Analysis"].store( Xb )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
+        selfA._setInternalState("seed", numpy.random.get_state())
+    elif selfA._parameters["nextStep"]:
+        Xn = selfA._getInternalState("Xn")
+    #
+    for step in range(duration-1):
+        numpy.random.set_state(selfA._getInternalState("seed"))
+        if hasattr(Y,"store"):
+            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
+        else:
+            Ynpu = numpy.ravel( Y ).reshape((__p,1))
+        #
+        if U is not None:
+            if hasattr(U,"store") and len(U)>1:
+                Un = numpy.ravel( U[step] ).reshape((-1,1))
+            elif hasattr(U,"store") and len(U)==1:
+                Un = numpy.ravel( U[0] ).reshape((-1,1))
+            else:
+                Un = numpy.ravel( U ).reshape((-1,1))
+        else:
+            Un = None
+        #
+        if selfA._parameters["InflationType"] == "MultiplicativeOnBackgroundAnomalies":
+            Xn = CovarianceInflation( Xn,
+                selfA._parameters["InflationType"],
+                selfA._parameters["InflationFactor"],
+                )
+        #
+        if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
+            EMX = M( [(Xn[:,i], Un) for i in range(__m)],
+                argsAsSerie = True,
+                returnSerieAsArrayMatrix = True )
+            Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
+            HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
+                argsAsSerie = True,
+                returnSerieAsArrayMatrix = True )
+            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
+                Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
+                Xn_predicted = Xn_predicted + Cm @ Un
+        elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
+            # --- > Par principe, M = Id, Q = 0
+            Xn_predicted = EMX = Xn
+            HX_predicted = H( [(Xn_predicted[:,i], Un) for i in range(__m)],
+                argsAsSerie = True,
+                returnSerieAsArrayMatrix = True )
+        #
+        # Mean of forecast and observation of forecast
+        Xfm  = EnsembleMean( Xn_predicted )
+        Hfm  = EnsembleMean( HX_predicted )
+        #
+        #--------------------------
+        if VariantM == "KalmanFilterFormula05":
+            PfHT, HPfHT = 0., 0.
+            for i in range(__m):
+                Exfi = Xn_predicted[:,i].reshape((__n,1)) - Xfm
+                Eyfi = HX_predicted[:,i].reshape((__p,1)) - Hfm
+                PfHT  += Exfi * Eyfi.T
+                HPfHT += Eyfi * Eyfi.T
+            PfHT  = (1./(__m-1)) * PfHT
+            HPfHT = (1./(__m-1)) * HPfHT
+            Kn     = PfHT * ( R + HPfHT ).I
+            del PfHT, HPfHT
+            #
+            for i in range(__m):
+                ri = numpy.random.multivariate_normal(numpy.zeros(__p), Rn)
+                Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(Ynpu) + ri - HX_predicted[:,i])
+        #--------------------------
+        elif VariantM == "KalmanFilterFormula16":
+            EpY   = EnsembleOfCenteredPerturbations(Ynpu, Rn, __m)
+            EpYm  = EpY.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
+            #
+            EaX   = EnsembleOfAnomalies( Xn_predicted ) / math.sqrt(__m-1)
+            EaY = (HX_predicted - Hfm - EpY + EpYm) / math.sqrt(__m-1)
+            #
+            Kn = EaX @ EaY.T @ numpy.linalg.inv( EaY @ EaY.T)
+            #
+            for i in range(__m):
+                Xn[:,i] = numpy.ravel(Xn_predicted[:,i]) + Kn @ (numpy.ravel(EpY[:,i]) - HX_predicted[:,i])
+        #--------------------------
+        else:
+            raise ValueError("VariantM has to be chosen in the authorized methods list.")
+        #
+        if selfA._parameters["InflationType"] == "MultiplicativeOnAnalysisAnomalies":
+            Xn = CovarianceInflation( Xn,
+                selfA._parameters["InflationType"],
+                selfA._parameters["InflationFactor"],
+                )
+        #
+        if Hybrid == "E3DVAR":
+            betaf = selfA._parameters["HybridCovarianceEquilibrium"]
+            Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
+        #
+        Xa = EnsembleMean( Xn )
+        #--------------------------
+        selfA._setInternalState("Xn", Xn)
+        selfA._setInternalState("seed", numpy.random.get_state())
+        #--------------------------
+        #
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CostFunctionJ") \
+            or selfA._toStore("CostFunctionJb") \
+            or selfA._toStore("CostFunctionJo") \
+            or selfA._toStore("APosterioriCovariance") \
+            or selfA._toStore("InnovationAtCurrentAnalysis") \
+            or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
+            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
+            _Innovation = Ynpu - _HXa
+        #
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        # ---> avec analysis
+        selfA.StoredVariables["Analysis"].store( Xa )
+        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
+        if selfA._toStore("InnovationAtCurrentAnalysis"):
+            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
+        # ---> avec current state
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CurrentState"):
+            selfA.StoredVariables["CurrentState"].store( Xn )
+        if selfA._toStore("ForecastState"):
+            selfA.StoredVariables["ForecastState"].store( EMX )
+        if selfA._toStore("ForecastCovariance"):
+            selfA.StoredVariables["ForecastCovariance"].store( EnsembleErrorCovariance(EMX) )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( EMX - Xa )
+        if selfA._toStore("InnovationAtCurrentState"):
+            selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
+        if selfA._toStore("SimulatedObservationAtCurrentState") \
+            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
+        # ---> autres
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CostFunctionJ") \
+            or selfA._toStore("CostFunctionJb") \
+            or selfA._toStore("CostFunctionJo") \
+            or selfA._toStore("CurrentOptimum") \
+            or selfA._toStore("APosterioriCovariance"):
+            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
+            J   = Jb + Jo
+            selfA.StoredVariables["CostFunctionJb"].store( Jb )
+            selfA.StoredVariables["CostFunctionJo"].store( Jo )
+            selfA.StoredVariables["CostFunctionJ" ].store( J )
+            #
+            if selfA._toStore("IndexOfOptimum") \
+                or selfA._toStore("CurrentOptimum") \
+                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
+                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+            if selfA._toStore("IndexOfOptimum"):
+                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+            if selfA._toStore("CurrentOptimum"):
+                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
+            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
+            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
+        if selfA._parameters["EstimationOf"] == "Parameters" \
+            and J < previousJMinimum:
+            previousJMinimum    = J
+            XaMin               = Xa
+            if selfA._toStore("APosterioriCovariance"):
+                covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
+        # ---> Pour les smoothers
+        if selfA._toStore("CurrentEnsembleState"):
+            selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
+    #
+    # Stockage final supplémentaire de l'optimum en estimation de paramètres
+    # ----------------------------------------------------------------------
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        selfA.StoredVariables["Analysis"].store( XaMin )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
+    #
+    return 0
+
+# ==============================================================================
+def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
+    """
+    3DVAR
+    """
+    #
+    # Initialisations
+    # ---------------
+    Hm = HO["Direct"].appliedTo
+    Ha = HO["Adjoint"].appliedInXTo
+    #
+    if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
+        HXb = numpy.asarray(Hm( Xb, HO["AppliedInX"]["HXb"] ))
+    else:
+        HXb = numpy.asarray(Hm( Xb ))
+    HXb = HXb.reshape((-1,1))
+    if Y.size != HXb.size:
+        raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
+    if max(Y.shape) != max(HXb.shape):
+        raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
+    #
+    if selfA._toStore("JacobianMatrixAtBackground"):
+        HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
+        HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
+        selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
+    #
+    BI = B.getI()
+    RI = R.getI()
+    #
+    Xini = selfA._parameters["InitializationPoint"]
+    #
+    # Définition de la fonction-coût
+    # ------------------------------
+    def CostFunction(x):
+        _X  = numpy.asarray(x).reshape((-1,1))
+        if selfA._parameters["StoreInternalVariables"] or \
+            selfA._toStore("CurrentState") or \
+            selfA._toStore("CurrentOptimum"):
+            selfA.StoredVariables["CurrentState"].store( _X )
+        _HX = numpy.asarray(Hm( _X )).reshape((-1,1))
+        _Innovation = Y - _HX
+        if selfA._toStore("SimulatedObservationAtCurrentState") or \
+            selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
+        if selfA._toStore("InnovationAtCurrentState"):
+            selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
+        #
+        Jb  = float( 0.5 * (_X - Xb).T * (BI * (_X - Xb)) )
+        Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
+        J   = Jb + Jo
+        #
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
+        selfA.StoredVariables["CostFunctionJb"].store( Jb )
+        selfA.StoredVariables["CostFunctionJo"].store( Jo )
+        selfA.StoredVariables["CostFunctionJ" ].store( J )
+        if selfA._toStore("IndexOfOptimum") or \
+            selfA._toStore("CurrentOptimum") or \
+            selfA._toStore("CostFunctionJAtCurrentOptimum") or \
+            selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
+            selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
+            selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+        if selfA._toStore("IndexOfOptimum"):
+            selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+        if selfA._toStore("CurrentOptimum"):
+            selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
+        if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
+        if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+            selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+        if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+            selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+        if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+            selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+        return J
+    #
+    def GradientOfCostFunction(x):
+        _X      = numpy.asarray(x).reshape((-1,1))
+        _HX     = numpy.asarray(Hm( _X )).reshape((-1,1))
+        GradJb  = BI * (_X - Xb)
+        GradJo  = - Ha( (_X, RI * (Y - _HX)) )
+        GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
+        return GradJ
+    #
+    # Minimisation de la fonctionnelle
+    # --------------------------------
+    nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
+    #
+    if selfA._parameters["Minimizer"] == "LBFGSB":
+        if "0.19" <= scipy.version.version <= "1.1.0":
+            import lbfgsbhlt as optimiseur
+        else:
+            import scipy.optimize as optimiseur
+        Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
+            func        = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            bounds      = selfA._parameters["Bounds"],
+            maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
+            factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
+            pgtol       = selfA._parameters["ProjectedGradientTolerance"],
+            iprint      = selfA._parameters["optiprint"],
+            )
+        nfeval = Informations['funcalls']
+        rc     = Informations['warnflag']
+    elif selfA._parameters["Minimizer"] == "TNC":
+        Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
+            func        = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            bounds      = selfA._parameters["Bounds"],
+            maxfun      = selfA._parameters["MaximumNumberOfSteps"],
+            pgtol       = selfA._parameters["ProjectedGradientTolerance"],
+            ftol        = selfA._parameters["CostDecrementTolerance"],
+            messages    = selfA._parameters["optmessages"],
+            )
+    elif selfA._parameters["Minimizer"] == "CG":
+        Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
+            f           = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
+            gtol        = selfA._parameters["GradientNormTolerance"],
+            disp        = selfA._parameters["optdisp"],
+            full_output = True,
+            )
+    elif selfA._parameters["Minimizer"] == "NCG":
+        Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
+            f           = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
+            avextol     = selfA._parameters["CostDecrementTolerance"],
+            disp        = selfA._parameters["optdisp"],
+            full_output = True,
+            )
+    elif selfA._parameters["Minimizer"] == "BFGS":
+        Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
+            f           = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
+            gtol        = selfA._parameters["GradientNormTolerance"],
+            disp        = selfA._parameters["optdisp"],
+            full_output = True,
+            )
+    else:
+        raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
+    #
+    IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+    MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
+    #
+    # Correction pour pallier a un bug de TNC sur le retour du Minimum
+    # ----------------------------------------------------------------
+    if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
+        Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
+    #
+    Xa = Minimum
+    #--------------------------
+    #
+    selfA.StoredVariables["Analysis"].store( Xa )
+    #
+    if selfA._toStore("OMA") or \
+        selfA._toStore("SigmaObs2") or \
+        selfA._toStore("SimulationQuantiles") or \
+        selfA._toStore("SimulatedObservationAtOptimum"):
+        if selfA._toStore("SimulatedObservationAtCurrentState"):
+            HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
+        elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
+        else:
+            HXa = Hm( Xa )
+    #
+    if selfA._toStore("APosterioriCovariance") or \
+        selfA._toStore("SimulationQuantiles") or \
+        selfA._toStore("JacobianMatrixAtOptimum") or \
+        selfA._toStore("KalmanGainAtOptimum"):
+        HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
+        HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
+    if selfA._toStore("APosterioriCovariance") or \
+        selfA._toStore("SimulationQuantiles") or \
+        selfA._toStore("KalmanGainAtOptimum"):
+        HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
+        HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
+    if selfA._toStore("APosterioriCovariance") or \
+        selfA._toStore("SimulationQuantiles"):
+        A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
+    if selfA._toStore("APosterioriCovariance"):
+        selfA.StoredVariables["APosterioriCovariance"].store( A )
+    if selfA._toStore("JacobianMatrixAtOptimum"):
+        selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
+    if selfA._toStore("KalmanGainAtOptimum"):
+        if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
+        elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
+        selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
+    #
+    # Calculs et/ou stockages supplémentaires
+    # ---------------------------------------
+    if selfA._toStore("Innovation") or \
+        selfA._toStore("SigmaObs2") or \
+        selfA._toStore("MahalanobisConsistency") or \
+        selfA._toStore("OMB"):
+        d  = Y - HXb
+    if selfA._toStore("Innovation"):
+        selfA.StoredVariables["Innovation"].store( d )
+    if selfA._toStore("BMA"):
+        selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
+    if selfA._toStore("OMA"):
+        selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
+    if selfA._toStore("OMB"):
+        selfA.StoredVariables["OMB"].store( d )
+    if selfA._toStore("SigmaObs2"):
+        TraceR = R.trace(Y.size)
+        selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
+    if selfA._toStore("MahalanobisConsistency"):
+        selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
+    if selfA._toStore("SimulationQuantiles"):
+        QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
+    if selfA._toStore("SimulatedObservationAtBackground"):
+        selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
+    if selfA._toStore("SimulatedObservationAtOptimum"):
+        selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
+    #
+    return 0
+
+# ==============================================================================
+def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
+    """
+    4DVAR
+    """
+    #
+    # Initialisations
+    # ---------------
+    #
+    # Opérateurs
+    Hm = HO["Direct"].appliedControledFormTo
+    Mm = EM["Direct"].appliedControledFormTo
+    #
+    if CM is not None and "Tangent" in CM and U is not None:
+        Cm = CM["Tangent"].asMatrix(Xb)
+    else:
+        Cm = None
+    #
+    def Un(_step):
+        if U is not None:
+            if hasattr(U,"store") and 1<=_step<len(U) :
+                _Un = numpy.ravel( U[_step] ).reshape((-1,1))
+            elif hasattr(U,"store") and len(U)==1:
+                _Un = numpy.ravel( U[0] ).reshape((-1,1))
+            else:
+                _Un = numpy.ravel( U ).reshape((-1,1))
+        else:
+            _Un = None
+        return _Un
+    def CmUn(_xn,_un):
+        if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
+            _Cm   = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
+            _CmUn = (_Cm @ _un).reshape((-1,1))
+        else:
+            _CmUn = 0.
+        return _CmUn
+    #
+    # Remarque : les observations sont exploitées à partir du pas de temps
+    # numéro 1, et sont utilisées dans Yo comme rangées selon ces indices.
+    # Donc le pas 0 n'est pas utilisé puisque la première étape commence
+    # avec l'observation du pas 1.
+    #
+    # Nombre de pas identique au nombre de pas d'observations
+    if hasattr(Y,"stepnumber"):
+        duration = Y.stepnumber()
+    else:
+        duration = 2
+    #
+    # Précalcul des inversions de B et R
+    BI = B.getI()
+    RI = R.getI()
+    #
+    # Point de démarrage de l'optimisation
+    Xini = selfA._parameters["InitializationPoint"]
+    #
+    # Définition de la fonction-coût
+    # ------------------------------
+    selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
+    selfA.DirectInnovation  = [None,] # Le pas 0 n'est pas observé
+    def CostFunction(x):
+        _X  = numpy.asarray(x).reshape((-1,1))
+        if selfA._parameters["StoreInternalVariables"] or \
+            selfA._toStore("CurrentState") or \
+            selfA._toStore("CurrentOptimum"):
+            selfA.StoredVariables["CurrentState"].store( _X )
+        Jb  = float( 0.5 * (_X - Xb).T * (BI * (_X - Xb)) )
+        selfA.DirectCalculation = [None,]
+        selfA.DirectInnovation  = [None,]
+        Jo  = 0.
+        _Xn = _X
+        for step in range(0,duration-1):
+            if hasattr(Y,"store"):
+                _Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
+            else:
+                _Ynpu = numpy.ravel( Y ).reshape((-1,1))
+            _Un = Un(step)
+            #
+            # Etape d'évolution
+            if selfA._parameters["EstimationOf"] == "State":
+                _Xn = Mm( (_Xn, _Un) ).reshape((-1,1)) + CmUn(_Xn, _Un)
+            elif selfA._parameters["EstimationOf"] == "Parameters":
+                pass
+            #
+            if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
+                _Xn = ApplyBounds( _Xn, ForceNumericBounds(selfA._parameters["Bounds"]) )
+            #
+            # Etape de différence aux observations
+            if selfA._parameters["EstimationOf"] == "State":
+                _YmHMX = _Ynpu - numpy.ravel( Hm( (_Xn, None) ) ).reshape((-1,1))
+            elif selfA._parameters["EstimationOf"] == "Parameters":
+                _YmHMX = _Ynpu - numpy.ravel( Hm( (_Xn, _Un) ) ).reshape((-1,1)) - CmUn(_Xn, _Un)
+            #
+            # Stockage de l'état
+            selfA.DirectCalculation.append( _Xn )
+            selfA.DirectInnovation.append( _YmHMX )
+            #
+            # Ajout dans la fonctionnelle d'observation
+            Jo = Jo + 0.5 * float( _YmHMX.T * (RI * _YmHMX) )
+        J = Jb + Jo
+        #
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
+        selfA.StoredVariables["CostFunctionJb"].store( Jb )
+        selfA.StoredVariables["CostFunctionJo"].store( Jo )
+        selfA.StoredVariables["CostFunctionJ" ].store( J )
+        if selfA._toStore("IndexOfOptimum") or \
+            selfA._toStore("CurrentOptimum") or \
+            selfA._toStore("CostFunctionJAtCurrentOptimum") or \
+            selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
+            selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+            IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+        if selfA._toStore("IndexOfOptimum"):
+            selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+        if selfA._toStore("CurrentOptimum"):
+            selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
+        if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+            selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+        if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+            selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+        if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+            selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+        return J
+    #
+    def GradientOfCostFunction(x):
+        _X      = numpy.asarray(x).reshape((-1,1))
+        GradJb  = BI * (_X - Xb)
+        GradJo  = 0.
+        for step in range(duration-1,0,-1):
+            # Étape de récupération du dernier stockage de l'évolution
+            _Xn = selfA.DirectCalculation.pop()
+            # Étape de récupération du dernier stockage de l'innovation
+            _YmHMX = selfA.DirectInnovation.pop()
+            # Calcul des adjoints
+            Ha = HO["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
+            Ha = Ha.reshape(_Xn.size,_YmHMX.size) # ADAO & check shape
+            Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
+            Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
+            # Calcul du gradient par état adjoint
+            GradJo = GradJo + Ha * (RI * _YmHMX) # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
+            GradJo = Ma * GradJo                 # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
+        GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
+        return GradJ
+    #
+    # Minimisation de la fonctionnelle
+    # --------------------------------
+    nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
+    #
+    if selfA._parameters["Minimizer"] == "LBFGSB":
+        if "0.19" <= scipy.version.version <= "1.1.0":
+            import lbfgsbhlt as optimiseur
+        else:
+            import scipy.optimize as optimiseur
+        Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
+            func        = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            bounds      = selfA._parameters["Bounds"],
+            maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
+            factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
+            pgtol       = selfA._parameters["ProjectedGradientTolerance"],
+            iprint      = selfA._parameters["optiprint"],
+            )
+        nfeval = Informations['funcalls']
+        rc     = Informations['warnflag']
+    elif selfA._parameters["Minimizer"] == "TNC":
+        Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
+            func        = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            bounds      = selfA._parameters["Bounds"],
+            maxfun      = selfA._parameters["MaximumNumberOfSteps"],
+            pgtol       = selfA._parameters["ProjectedGradientTolerance"],
+            ftol        = selfA._parameters["CostDecrementTolerance"],
+            messages    = selfA._parameters["optmessages"],
+            )
+    elif selfA._parameters["Minimizer"] == "CG":
+        Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
+            f           = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
+            gtol        = selfA._parameters["GradientNormTolerance"],
+            disp        = selfA._parameters["optdisp"],
+            full_output = True,
+            )
+    elif selfA._parameters["Minimizer"] == "NCG":
+        Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
+            f           = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
+            avextol     = selfA._parameters["CostDecrementTolerance"],
+            disp        = selfA._parameters["optdisp"],
+            full_output = True,
+            )
+    elif selfA._parameters["Minimizer"] == "BFGS":
+        Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
+            f           = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
+            gtol        = selfA._parameters["GradientNormTolerance"],
+            disp        = selfA._parameters["optdisp"],
+            full_output = True,
+            )
+    else:
+        raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
+    #
+    IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+    MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
+    #
+    # Correction pour pallier a un bug de TNC sur le retour du Minimum
+    # ----------------------------------------------------------------
+    if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
+        Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
+    #
+    # Obtention de l'analyse
+    # ----------------------
+    Xa = Minimum
+    #
+    selfA.StoredVariables["Analysis"].store( Xa )
+    #
+    # Calculs et/ou stockages supplémentaires
+    # ---------------------------------------
+    if selfA._toStore("BMA"):
+        selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
+    #
+    return 0
+
+# ==============================================================================
+def stdkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
+    """
+    Standard Kalman Filter
+    """
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA._parameters["StoreInternalVariables"] = True
+    #
+    # Opérateurs
+    # ----------
+    Ht = HO["Tangent"].asMatrix(Xb)
+    Ha = HO["Adjoint"].asMatrix(Xb)
+    #
+    if selfA._parameters["EstimationOf"] == "State":
+        Mt = EM["Tangent"].asMatrix(Xb)
+        Ma = EM["Adjoint"].asMatrix(Xb)
+    #
+    if CM is not None and "Tangent" in CM and U is not None:
+        Cm = CM["Tangent"].asMatrix(Xb)
+    else:
+        Cm = None
+    #
+    # Durée d'observation et tailles
+    if hasattr(Y,"stepnumber"):
+        duration = Y.stepnumber()
+        __p = numpy.cumprod(Y.shape())[-1]
+    else:
+        duration = 2
+        __p = numpy.array(Y).size
+    #
+    # Précalcul des inversions de B et R
+    if selfA._parameters["StoreInternalVariables"] \
+        or selfA._toStore("CostFunctionJ") \
+        or selfA._toStore("CostFunctionJb") \
+        or selfA._toStore("CostFunctionJo") \
+        or selfA._toStore("CurrentOptimum") \
+        or selfA._toStore("APosterioriCovariance"):
+        BI = B.getI()
+        RI = R.getI()
+    #
+    __n = Xb.size
+    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
+    #
+    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+        Xn = Xb
+        Pn = B
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        selfA.StoredVariables["Analysis"].store( Xb )
+        if selfA._toStore("APosterioriCovariance"):
+            if hasattr(B,"asfullmatrix"):
+                selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
+            else:
+                selfA.StoredVariables["APosterioriCovariance"].store( B )
+        selfA._setInternalState("seed", numpy.random.get_state())
+    elif selfA._parameters["nextStep"]:
+        Xn = selfA._getInternalState("Xn")
+        Pn = selfA._getInternalState("Pn")
+    #
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        XaMin            = Xn
+        previousJMinimum = numpy.finfo(float).max
+    #
+    for step in range(duration-1):
+        if hasattr(Y,"store"):
+            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
+        else:
+            Ynpu = numpy.ravel( Y ).reshape((__p,1))
+        #
+        if U is not None:
+            if hasattr(U,"store") and len(U)>1:
+                Un = numpy.ravel( U[step] ).reshape((-1,1))
+            elif hasattr(U,"store") and len(U)==1:
+                Un = numpy.ravel( U[0] ).reshape((-1,1))
+            else:
+                Un = numpy.ravel( U ).reshape((-1,1))
+        else:
+            Un = None
+        #
+        if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
+            Xn_predicted = Mt @ Xn
+            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
+                Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
+                Xn_predicted = Xn_predicted + Cm @ Un
+            Pn_predicted = Q + Mt * (Pn * Ma)
+        elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
+            # --- > Par principe, M = Id, Q = 0
+            Xn_predicted = Xn
+            Pn_predicted = Pn
+        #
+        if selfA._parameters["EstimationOf"] == "State":
+            HX_predicted = Ht @ Xn_predicted
+            _Innovation  = Ynpu - HX_predicted
+        elif selfA._parameters["EstimationOf"] == "Parameters":
+            HX_predicted = Ht @ Xn_predicted
+            _Innovation  = Ynpu - HX_predicted
+            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
+                _Innovation = _Innovation - Cm @ Un
+        #
+        Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
+        Xn = Xn_predicted + Kn * _Innovation
+        Pn = Pn_predicted - Kn * Ht * Pn_predicted
+        #
+        Xa = Xn # Pointeurs
+        #--------------------------
+        selfA._setInternalState("Xn", Xn)
+        selfA._setInternalState("Pn", Pn)
+        #--------------------------
+        #
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        # ---> avec analysis
+        selfA.StoredVariables["Analysis"].store( Xa )
+        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Ht * Xa )
+        if selfA._toStore("InnovationAtCurrentAnalysis"):
+            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
+        # ---> avec current state
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CurrentState"):
+            selfA.StoredVariables["CurrentState"].store( Xn )
+        if selfA._toStore("ForecastState"):
+            selfA.StoredVariables["ForecastState"].store( Xn_predicted )
+        if selfA._toStore("ForecastCovariance"):
+            selfA.StoredVariables["ForecastCovariance"].store( Pn_predicted )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( Xn_predicted - Xa )
+        if selfA._toStore("InnovationAtCurrentState"):
+            selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
+        if selfA._toStore("SimulatedObservationAtCurrentState") \
+            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
+        # ---> autres
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CostFunctionJ") \
+            or selfA._toStore("CostFunctionJb") \
+            or selfA._toStore("CostFunctionJo") \
+            or selfA._toStore("CurrentOptimum") \
+            or selfA._toStore("APosterioriCovariance"):
+            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
+            J   = Jb + Jo
+            selfA.StoredVariables["CostFunctionJb"].store( Jb )
+            selfA.StoredVariables["CostFunctionJo"].store( Jo )
+            selfA.StoredVariables["CostFunctionJ" ].store( J )
+            #
+            if selfA._toStore("IndexOfOptimum") \
+                or selfA._toStore("CurrentOptimum") \
+                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
+                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+            if selfA._toStore("IndexOfOptimum"):
+                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+            if selfA._toStore("CurrentOptimum"):
+                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
+            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
+            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
+        if selfA._parameters["EstimationOf"] == "Parameters" \
+            and J < previousJMinimum:
+            previousJMinimum    = J
+            XaMin               = Xa
+            if selfA._toStore("APosterioriCovariance"):
+                covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
+    #
+    # Stockage final supplémentaire de l'optimum en estimation de paramètres
+    # ----------------------------------------------------------------------
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        selfA.StoredVariables["Analysis"].store( XaMin )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
+    #
+    return 0
+
+# ==============================================================================
+def uskf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
+    """
+    Unscented Kalman Filter
+    """
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA._parameters["StoreInternalVariables"] = True
+    #
+    L     = Xb.size
+    Alpha = selfA._parameters["Alpha"]
+    Beta  = selfA._parameters["Beta"]
+    if selfA._parameters["Kappa"] == 0:
+        if selfA._parameters["EstimationOf"] == "State":
+            Kappa = 0
+        elif selfA._parameters["EstimationOf"] == "Parameters":
+            Kappa = 3 - L
+    else:
+        Kappa = selfA._parameters["Kappa"]
+    Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
+    Gamma  = math.sqrt( L + Lambda )
+    #
+    Ww = []
+    Ww.append( 0. )
+    for i in range(2*L):
+        Ww.append( 1. / (2.*(L + Lambda)) )
+    #
+    Wm = numpy.array( Ww )
+    Wm[0] = Lambda / (L + Lambda)
+    Wc = numpy.array( Ww )
+    Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
+    #
+    # Opérateurs
+    Hm = HO["Direct"].appliedControledFormTo
+    #
+    if selfA._parameters["EstimationOf"] == "State":
+        Mm = EM["Direct"].appliedControledFormTo
+    #
+    if CM is not None and "Tangent" in CM and U is not None:
+        Cm = CM["Tangent"].asMatrix(Xb)
+    else:
+        Cm = None
+    #
+    # Durée d'observation et tailles
+    if hasattr(Y,"stepnumber"):
+        duration = Y.stepnumber()
+        __p = numpy.cumprod(Y.shape())[-1]
+    else:
+        duration = 2
+        __p = numpy.array(Y).size
+    #
+    # Précalcul des inversions de B et R
+    if selfA._parameters["StoreInternalVariables"] \
+        or selfA._toStore("CostFunctionJ") \
+        or selfA._toStore("CostFunctionJb") \
+        or selfA._toStore("CostFunctionJo") \
+        or selfA._toStore("CurrentOptimum") \
+        or selfA._toStore("APosterioriCovariance"):
+        BI = B.getI()
+        RI = R.getI()
+    #
+    __n = Xb.size
+    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
+    #
+    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+        Xn = Xb
+        if hasattr(B,"asfullmatrix"):
+            Pn = B.asfullmatrix(__n)
+        else:
+            Pn = B
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        selfA.StoredVariables["Analysis"].store( Xb )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
+    elif selfA._parameters["nextStep"]:
+        Xn = selfA._getInternalState("Xn")
+        Pn = selfA._getInternalState("Pn")
+    #
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        XaMin            = Xn
+        previousJMinimum = numpy.finfo(float).max
+    #
+    for step in range(duration-1):
+        if hasattr(Y,"store"):
+            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
+        else:
+            Ynpu = numpy.ravel( Y ).reshape((__p,1))
+        #
+        if U is not None:
+            if hasattr(U,"store") and len(U)>1:
+                Un = numpy.ravel( U[step] ).reshape((-1,1))
+            elif hasattr(U,"store") and len(U)==1:
+                Un = numpy.ravel( U[0] ).reshape((-1,1))
+            else:
+                Un = numpy.ravel( U ).reshape((-1,1))
+        else:
+            Un = None
+        #
+        Pndemi = numpy.real(scipy.linalg.sqrtm(Pn))
+        Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
+        nbSpts = 2*Xn.size+1
+        #
+        XEtnnp = []
+        for point in range(nbSpts):
+            if selfA._parameters["EstimationOf"] == "State":
+                XEtnnpi = numpy.asarray( Mm( (Xnp[:,point], Un) ) ).reshape((-1,1))
+                if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
+                    Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
+                    XEtnnpi = XEtnnpi + Cm @ Un
+            elif selfA._parameters["EstimationOf"] == "Parameters":
+                # --- > Par principe, M = Id, Q = 0
+                XEtnnpi = Xnp[:,point]
+            XEtnnp.append( numpy.ravel(XEtnnpi).reshape((-1,1)) )
+        XEtnnp = numpy.concatenate( XEtnnp, axis=1 )
+        #
+        Xncm = ( XEtnnp * Wm ).sum(axis=1)
+        #
+        if selfA._parameters["EstimationOf"] == "State":        Pnm = Q
+        elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
+        for point in range(nbSpts):
+            Pnm += Wc[i] * ((XEtnnp[:,point]-Xncm).reshape((-1,1)) * (XEtnnp[:,point]-Xncm))
+        #
+        Pnmdemi = numpy.real(scipy.linalg.sqrtm(Pnm))
+        #
+        Xnnp = numpy.hstack([Xncm.reshape((-1,1)), Xncm.reshape((-1,1))+Gamma*Pnmdemi, Xncm.reshape((-1,1))-Gamma*Pnmdemi])
+        #
+        Ynnp = []
+        for point in range(nbSpts):
+            if selfA._parameters["EstimationOf"] == "State":
+                Ynnpi = Hm( (Xnnp[:,point], None) )
+            elif selfA._parameters["EstimationOf"] == "Parameters":
+                Ynnpi = Hm( (Xnnp[:,point], Un) )
+            Ynnp.append( numpy.ravel(Ynnpi).reshape((-1,1)) )
+        Ynnp = numpy.concatenate( Ynnp, axis=1 )
+        #
+        Yncm = ( Ynnp * Wm ).sum(axis=1)
+        #
+        Pyyn = R
+        Pxyn = 0.
+        for point in range(nbSpts):
+            Pyyn += Wc[i] * ((Ynnp[:,point]-Yncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
+            Pxyn += Wc[i] * ((Xnnp[:,point]-Xncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
+        #
+        _Innovation  = Ynpu - Yncm.reshape((-1,1))
+        if selfA._parameters["EstimationOf"] == "Parameters":
+            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
+                _Innovation = _Innovation - Cm @ Un
+        #
+        Kn = Pxyn * Pyyn.I
+        Xn = Xncm.reshape((-1,1)) + Kn * _Innovation
+        Pn = Pnm - Kn * Pyyn * Kn.T
+        #
+        Xa = Xn # Pointeurs
+        #--------------------------
+        selfA._setInternalState("Xn", Xn)
+        selfA._setInternalState("Pn", Pn)
+        #--------------------------
+        #
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        # ---> avec analysis
+        selfA.StoredVariables["Analysis"].store( Xa )
+        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) )
+        if selfA._toStore("InnovationAtCurrentAnalysis"):
+            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
+        # ---> avec current state
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CurrentState"):
+            selfA.StoredVariables["CurrentState"].store( Xn )
+        if selfA._toStore("ForecastState"):
+            selfA.StoredVariables["ForecastState"].store( Xncm )
+        if selfA._toStore("ForecastCovariance"):
+            selfA.StoredVariables["ForecastCovariance"].store( Pnm )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( Xncm - Xa )
+        if selfA._toStore("InnovationAtCurrentState"):
+            selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
+        if selfA._toStore("SimulatedObservationAtCurrentState") \
+            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm )
+        # ---> autres
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CostFunctionJ") \
+            or selfA._toStore("CostFunctionJb") \
+            or selfA._toStore("CostFunctionJo") \
+            or selfA._toStore("CurrentOptimum") \
+            or selfA._toStore("APosterioriCovariance"):
+            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
+            J   = Jb + Jo
+            selfA.StoredVariables["CostFunctionJb"].store( Jb )
+            selfA.StoredVariables["CostFunctionJo"].store( Jo )
+            selfA.StoredVariables["CostFunctionJ" ].store( J )
+            #
+            if selfA._toStore("IndexOfOptimum") \
+                or selfA._toStore("CurrentOptimum") \
+                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
+                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+            if selfA._toStore("IndexOfOptimum"):
+                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+            if selfA._toStore("CurrentOptimum"):
+                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
+            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
+            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
+        if selfA._parameters["EstimationOf"] == "Parameters" \
+            and J < previousJMinimum:
+            previousJMinimum    = J
+            XaMin               = Xa
+            if selfA._toStore("APosterioriCovariance"):
+                covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
+    #
+    # Stockage final supplémentaire de l'optimum en estimation de paramètres
+    # ----------------------------------------------------------------------
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        selfA.StoredVariables["Analysis"].store( XaMin )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
+    #
+    return 0
+
+# ==============================================================================
+def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
+    """
+    3DVAR variational analysis with no inversion of B
+    """
+    #
+    # Initialisations
+    # ---------------
+    Hm = HO["Direct"].appliedTo
+    Ha = HO["Adjoint"].appliedInXTo
+    #
+    BT = B.getT()
+    RI = R.getI()
+    #
+    Xini = numpy.zeros(Xb.size)
+    #
+    # Définition de la fonction-coût
+    # ------------------------------
+    def CostFunction(v):
+        _V = numpy.asarray(v).reshape((-1,1))
+        _X = Xb + (B @ _V).reshape((-1,1))
+        if selfA._parameters["StoreInternalVariables"] or \
+            selfA._toStore("CurrentState") or \
+            selfA._toStore("CurrentOptimum"):
+            selfA.StoredVariables["CurrentState"].store( _X )
+        _HX = numpy.asarray(Hm( _X )).reshape((-1,1))
+        _Innovation = Y - _HX
+        if selfA._toStore("SimulatedObservationAtCurrentState") or \
+            selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
+        if selfA._toStore("InnovationAtCurrentState"):
+            selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
+        #
+        Jb  = float( 0.5 * _V.T * (BT * _V) )
+        Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
+        J   = Jb + Jo
+        #
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
+        selfA.StoredVariables["CostFunctionJb"].store( Jb )
+        selfA.StoredVariables["CostFunctionJo"].store( Jo )
+        selfA.StoredVariables["CostFunctionJ" ].store( J )
+        if selfA._toStore("IndexOfOptimum") or \
+            selfA._toStore("CurrentOptimum") or \
+            selfA._toStore("CostFunctionJAtCurrentOptimum") or \
+            selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
+            selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
+            selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+        if selfA._toStore("IndexOfOptimum"):
+            selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+        if selfA._toStore("CurrentOptimum"):
+            selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
+        if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
+        if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+            selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+        if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+            selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+        if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+            selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+        return J
+    #
+    def GradientOfCostFunction(v):
+        _V = numpy.asarray(v).reshape((-1,1))
+        _X = Xb + (B @ _V).reshape((-1,1))
+        _HX     = numpy.asarray(Hm( _X )).reshape((-1,1))
+        GradJb  = BT * _V
+        GradJo  = - Ha( (_X, RI * (Y - _HX)) )
+        GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
+        return GradJ
+    #
+    # Minimisation de la fonctionnelle
+    # --------------------------------
+    nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
+    #
+    if selfA._parameters["Minimizer"] == "LBFGSB":
+        if "0.19" <= scipy.version.version <= "1.1.0":
+            import lbfgsbhlt as optimiseur
+        else:
+            import scipy.optimize as optimiseur
+        Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
+            func        = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
+            maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
+            factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
+            pgtol       = selfA._parameters["ProjectedGradientTolerance"],
+            iprint      = selfA._parameters["optiprint"],
+            )
+        nfeval = Informations['funcalls']
+        rc     = Informations['warnflag']
+    elif selfA._parameters["Minimizer"] == "TNC":
+        Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
+            func        = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
+            maxfun      = selfA._parameters["MaximumNumberOfSteps"],
+            pgtol       = selfA._parameters["ProjectedGradientTolerance"],
+            ftol        = selfA._parameters["CostDecrementTolerance"],
+            messages    = selfA._parameters["optmessages"],
+            )
+    elif selfA._parameters["Minimizer"] == "CG":
+        Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
+            f           = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
+            gtol        = selfA._parameters["GradientNormTolerance"],
+            disp        = selfA._parameters["optdisp"],
+            full_output = True,
+            )
+    elif selfA._parameters["Minimizer"] == "NCG":
+        Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
+            f           = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
+            avextol     = selfA._parameters["CostDecrementTolerance"],
+            disp        = selfA._parameters["optdisp"],
+            full_output = True,
+            )
+    elif selfA._parameters["Minimizer"] == "BFGS":
+        Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
+            f           = CostFunction,
+            x0          = Xini,
+            fprime      = GradientOfCostFunction,
+            args        = (),
+            maxiter     = selfA._parameters["MaximumNumberOfSteps"],
+            gtol        = selfA._parameters["GradientNormTolerance"],
+            disp        = selfA._parameters["optdisp"],
+            full_output = True,
+            )
+    else:
+        raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
+    #
+    IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+    MinJ     = selfA.StoredVariables["CostFunctionJ"][IndexMin]
+    #
+    # Correction pour pallier a un bug de TNC sur le retour du Minimum
+    # ----------------------------------------------------------------
+    if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
+        Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
+    else:
+        Minimum = Xb + B * Minimum.reshape((-1,1)) # Pas @
+    #
+    Xa = Minimum
+    #--------------------------
+    #
+    selfA.StoredVariables["Analysis"].store( Xa )
+    #
+    if selfA._toStore("OMA") or \
+        selfA._toStore("SigmaObs2") or \
+        selfA._toStore("SimulationQuantiles") or \
+        selfA._toStore("SimulatedObservationAtOptimum"):
+        if selfA._toStore("SimulatedObservationAtCurrentState"):
+            HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
+        elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
+        else:
+            HXa = Hm( Xa )
+    #
+    if selfA._toStore("APosterioriCovariance") or \
+        selfA._toStore("SimulationQuantiles") or \
+        selfA._toStore("JacobianMatrixAtOptimum") or \
+        selfA._toStore("KalmanGainAtOptimum"):
+        HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
+        HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
+    if selfA._toStore("APosterioriCovariance") or \
+        selfA._toStore("SimulationQuantiles") or \
+        selfA._toStore("KalmanGainAtOptimum"):
+        HaM = HO["Adjoint"].asMatrix(ValueForMethodForm = Xa)
+        HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
+    if selfA._toStore("APosterioriCovariance") or \
+        selfA._toStore("SimulationQuantiles"):
+        BI = B.getI()
+        A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
+    if selfA._toStore("APosterioriCovariance"):
+        selfA.StoredVariables["APosterioriCovariance"].store( A )
+    if selfA._toStore("JacobianMatrixAtOptimum"):
+        selfA.StoredVariables["JacobianMatrixAtOptimum"].store( HtM )
+    if selfA._toStore("KalmanGainAtOptimum"):
+        if   (Y.size <= Xb.size): KG  = B * HaM * (R + numpy.dot(HtM, B * HaM)).I
+        elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
+        selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
+    #
+    # Calculs et/ou stockages supplémentaires
+    # ---------------------------------------
+    if selfA._toStore("Innovation") or \
+        selfA._toStore("SigmaObs2") or \
+        selfA._toStore("MahalanobisConsistency") or \
+        selfA._toStore("OMB"):
+        d  = Y - HXb
+    if selfA._toStore("Innovation"):
+        selfA.StoredVariables["Innovation"].store( d )
+    if selfA._toStore("BMA"):
+        selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
+    if selfA._toStore("OMA"):
+        selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
+    if selfA._toStore("OMB"):
+        selfA.StoredVariables["OMB"].store( d )
+    if selfA._toStore("SigmaObs2"):
+        TraceR = R.trace(Y.size)
+        selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
+    if selfA._toStore("MahalanobisConsistency"):
+        selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
+    if selfA._toStore("SimulationQuantiles"):
+        QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
+    if selfA._toStore("SimulatedObservationAtBackground"):
+        selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
+    if selfA._toStore("SimulatedObservationAtOptimum"):
+        selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
     #
-    return (xa, Ea, Sa)
+    return 0
 
 # ==============================================================================
 if __name__ == "__main__":