Salome HOME
Updating module version information
[modules/adao.git] / src / daComposant / daCore / NumericObjects.py
index 2c862af4cb7ca74edb102471fc1fbd1cd362d0dd..aa6c683592e0c1a146b1ed356675970ae5d8defd 100644 (file)
@@ -1,6 +1,6 @@
 # -*- coding: utf-8 -*-
 #
-# Copyright (C) 2008-2021 EDF R&D
+# Copyright (C) 2008-2022 EDF R&D
 #
 # This library is free software; you can redistribute it and/or
 # modify it under the terms of the GNU Lesser General Public
@@ -27,7 +27,7 @@ __author__ = "Jean-Philippe ARGAUD"
 
 import os, time, copy, types, sys, logging
 import math, numpy, scipy, scipy.optimize, scipy.version
-from daCore.BasicObjects import Operator
+from daCore.BasicObjects import Operator, Covariance, PartialAlgorithm
 from daCore.PlatformInfo import PlatformInfo
 mpr = PlatformInfo().MachinePrecision()
 mfp = PlatformInfo().MaximumPrecision()
@@ -37,7 +37,7 @@ mfp = PlatformInfo().MaximumPrecision()
 def ExecuteFunction( triplet ):
     assert len(triplet) == 3, "Incorrect number of arguments"
     X, xArgs, funcrepr = triplet
-    __X = numpy.asmatrix(numpy.ravel( X )).T
+    __X = numpy.ravel( X ).reshape((-1,1))
     __sys_path_tmp = sys.path ; sys.path.insert(0,funcrepr["__userFunction__path"])
     __module = __import__(funcrepr["__userFunction__modl"], globals(), locals(), [])
     __fonction = getattr(__module,funcrepr["__userFunction__name"])
@@ -66,6 +66,7 @@ class FDApproximation(object):
             increment             = 0.01,
             dX                    = None,
             extraArguments        = None,
+            reducingMemoryUse     = False,
             avoidingRedundancy    = True,
             toleranceInRedundancy = 1.e-18,
             lenghtOfRedundancy    = -1,
@@ -75,6 +76,7 @@ class FDApproximation(object):
             ):
         self.__name = str(name)
         self.__extraArgs = extraArguments
+        #
         if mpEnabled:
             try:
                 import multiprocessing
@@ -88,12 +90,12 @@ class FDApproximation(object):
             self.__mpWorkers = None
         logging.debug("FDA Calculs en multiprocessing : %s (nombre de processus : %s)"%(self.__mpEnabled,self.__mpWorkers))
         #
-        if mfEnabled:
-            self.__mfEnabled = True
-        else:
-            self.__mfEnabled = False
+        self.__mfEnabled = bool(mfEnabled)
         logging.debug("FDA Calculs en multifonctions : %s"%(self.__mfEnabled,))
         #
+        self.__rmEnabled = bool(reducingMemoryUse)
+        logging.debug("FDA Calculs avec réduction mémoire : %s"%(self.__rmEnabled,))
+        #
         if avoidingRedundancy:
             self.__avoidRC = True
             self.__tolerBP = float(toleranceInRedundancy)
@@ -105,6 +107,9 @@ class FDApproximation(object):
             self.__listJPIN = [] # Jacobian Previous Calculated Increment Norms
         else:
             self.__avoidRC = False
+        logging.debug("FDA Calculs avec réduction des doublons : %s"%self.__avoidRC)
+        if self.__avoidRC:
+            logging.debug("FDA Tolérance de détermination des doublons : %.2e"%self.__tolerBP)
         #
         if self.__mpEnabled:
             if isinstance(Function,types.FunctionType):
@@ -149,10 +154,7 @@ class FDApproximation(object):
         if dX is None:
             self.__dX     = None
         else:
-            self.__dX     = numpy.asmatrix(numpy.ravel( dX )).T
-        logging.debug("FDA Reduction des doublons de calcul : %s"%self.__avoidRC)
-        if self.__avoidRC:
-            logging.debug("FDA Tolerance de determination des doublons : %.2e"%self.__tolerBP)
+            self.__dX     = numpy.ravel( dX )
 
     # ---------------------------------------------------------
     def __doublon__(self, e, l, n, v=None):
@@ -164,6 +166,29 @@ class FDApproximation(object):
                 break
         return __ac, __iac
 
+    # ---------------------------------------------------------
+    def __listdotwith__(self, __LMatrix, __dotWith = None, __dotTWith = None):
+        "Produit incrémental d'une matrice liste de colonnes avec un vecteur"
+        if not isinstance(__LMatrix, (list,tuple)):
+            raise TypeError("Columnwise list matrix has not the proper type: %s"%type(__LMatrix))
+        if __dotWith is not None:
+            __Idwx = numpy.ravel( __dotWith )
+            assert len(__LMatrix) == __Idwx.size, "Incorrect size of elements"
+            __Produit = numpy.zeros(__LMatrix[0].size)
+            for i, col in enumerate(__LMatrix):
+                __Produit += float(__Idwx[i]) * col
+            return __Produit
+        elif __dotTWith is not None:
+            _Idwy = numpy.ravel( __dotTWith ).T
+            assert __LMatrix[0].size == _Idwy.size, "Incorrect size of elements"
+            __Produit = numpy.zeros(len(__LMatrix))
+            for i, col in enumerate(__LMatrix):
+                __Produit[i] = float( _Idwy @ col)
+            return __Produit
+        else:
+            __Produit = None
+        return __Produit
+
     # ---------------------------------------------------------
     def DirectOperator(self, X, **extraArgs ):
         """
@@ -176,17 +201,16 @@ class FDApproximation(object):
         if self.__mfEnabled:
             _HX = self.__userFunction( X, argsAsSerie = True )
         else:
-            _X = numpy.asmatrix(numpy.ravel( X )).T
-            _HX = numpy.ravel(self.__userFunction( _X ))
+            _HX = numpy.ravel(self.__userFunction( numpy.ravel(X) ))
         #
         return _HX
 
     # ---------------------------------------------------------
-    def TangentMatrix(self, X ):
+    def TangentMatrix(self, X, dotWith = None, dotTWith = None ):
         """
         Calcul de l'opérateur tangent comme la Jacobienne par différences finies,
         c'est-à-dire le gradient de H en X. On utilise des différences finies
-        directionnelles autour du point X. X est un numpy.matrix.
+        directionnelles autour du point X. X est un numpy.ndarray.
 
         Différences finies centrées (approximation d'ordre 2):
         1/ Pour chaque composante i de X, on ajoute et on enlève la perturbation
@@ -214,12 +238,14 @@ class FDApproximation(object):
         if X is None or len(X)==0:
             raise ValueError("Nominal point X for approximate derivatives can not be None or void (given X: %s)."%(str(X),))
         #
-        _X = numpy.asmatrix(numpy.ravel( X )).T
+        _X = numpy.ravel( X )
         #
         if self.__dX is None:
             _dX  = self.__increment * _X
         else:
-            _dX = numpy.asmatrix(numpy.ravel( self.__dX )).T
+            _dX = numpy.ravel( self.__dX )
+        assert len(_X) == len(_dX), "Inconsistent dX increment length with respect to the X one"
+        assert _X.size == _dX.size, "Inconsistent dX increment size with respect to the X one"
         #
         if (_dX == 0.).any():
             moyenne = _dX.mean()
@@ -234,11 +260,16 @@ class FDApproximation(object):
             __bidon, __alreadyCalculatedI = self.__doublon__(_dX, self.__listJPCI, self.__listJPIN, None)
             if __alreadyCalculatedP == __alreadyCalculatedI > -1:
                 __alreadyCalculated, __i = True, __alreadyCalculatedP
-                logging.debug("FDA Cas J déja calculé, récupération du doublon %i"%__i)
+                logging.debug("FDA Cas J déjà calculé, récupération du doublon %i"%__i)
         #
         if __alreadyCalculated:
             logging.debug("FDA   Calcul Jacobienne (par récupération du doublon %i)"%__i)
             _Jacobienne = self.__listJPCR[__i]
+            logging.debug("FDA Fin du calcul de la Jacobienne")
+            if dotWith is not None:
+                return numpy.dot(_Jacobienne,   numpy.ravel( dotWith ))
+            elif dotTWith is not None:
+                return numpy.dot(_Jacobienne.T, numpy.ravel( dotTWith ))
         else:
             logging.debug("FDA   Calcul Jacobienne (explicite)")
             if self.__centeredDF:
@@ -252,9 +283,9 @@ class FDApproximation(object):
                     _jobs = []
                     for i in range( len(_dX) ):
                         _dXi            = _dX[i]
-                        _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
+                        _X_plus_dXi     = numpy.array( _X, dtype=float )
                         _X_plus_dXi[i]  = _X[i] + _dXi
-                        _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
+                        _X_moins_dXi    = numpy.array( _X, dtype=float )
                         _X_moins_dXi[i] = _X[i] - _dXi
                         #
                         _jobs.append( (_X_plus_dXi,  self.__extraArgs, funcrepr) )
@@ -274,9 +305,9 @@ class FDApproximation(object):
                     _xserie = []
                     for i in range( len(_dX) ):
                         _dXi            = _dX[i]
-                        _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
+                        _X_plus_dXi     = numpy.array( _X, dtype=float )
                         _X_plus_dXi[i]  = _X[i] + _dXi
-                        _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
+                        _X_moins_dXi    = numpy.array( _X, dtype=float )
                         _X_moins_dXi[i] = _X[i] - _dXi
                         #
                         _xserie.append( _X_plus_dXi )
@@ -292,9 +323,9 @@ class FDApproximation(object):
                     _Jacobienne  = []
                     for i in range( _dX.size ):
                         _dXi            = _dX[i]
-                        _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
+                        _X_plus_dXi     = numpy.array( _X, dtype=float )
                         _X_plus_dXi[i]  = _X[i] + _dXi
-                        _X_moins_dXi    = numpy.array( _X.A1, dtype=float )
+                        _X_moins_dXi    = numpy.array( _X, dtype=float )
                         _X_moins_dXi[i] = _X[i] - _dXi
                         #
                         _HX_plus_dXi    = self.DirectOperator( _X_plus_dXi )
@@ -311,9 +342,9 @@ class FDApproximation(object):
                         "__userFunction__name" : self.__userFunction__name,
                     }
                     _jobs = []
-                    _jobs.append( (_X.A1, self.__extraArgs, funcrepr) )
+                    _jobs.append( (_X, self.__extraArgs, funcrepr) )
                     for i in range( len(_dX) ):
-                        _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
+                        _X_plus_dXi    = numpy.array( _X, dtype=float )
                         _X_plus_dXi[i] = _X[i] + _dX[i]
                         #
                         _jobs.append( (_X_plus_dXi, self.__extraArgs, funcrepr) )
@@ -332,9 +363,9 @@ class FDApproximation(object):
                     #
                 elif self.__mfEnabled:
                     _xserie = []
-                    _xserie.append( _X.A1 )
+                    _xserie.append( _X )
                     for i in range( len(_dX) ):
-                        _X_plus_dXi    = numpy.array( _X.A1, dtype=float )
+                        _X_plus_dXi    = numpy.array( _X, dtype=float )
                         _X_plus_dXi[i] = _X[i] + _dX[i]
                         #
                         _xserie.append( _X_plus_dXi )
@@ -352,30 +383,35 @@ class FDApproximation(object):
                     _HX = self.DirectOperator( _X )
                     for i in range( _dX.size ):
                         _dXi            = _dX[i]
-                        _X_plus_dXi     = numpy.array( _X.A1, dtype=float )
+                        _X_plus_dXi     = numpy.array( _X, dtype=float )
                         _X_plus_dXi[i]  = _X[i] + _dXi
                         #
                         _HX_plus_dXi = self.DirectOperator( _X_plus_dXi )
                         #
                         _Jacobienne.append( numpy.ravel(( _HX_plus_dXi - _HX ) / _dXi) )
-                #
             #
-            _Jacobienne = numpy.asmatrix( numpy.vstack( _Jacobienne ) ).T
-            if self.__avoidRC:
-                if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
-                while len(self.__listJPCP) > self.__lenghtRJ:
-                    self.__listJPCP.pop(0)
-                    self.__listJPCI.pop(0)
-                    self.__listJPCR.pop(0)
-                    self.__listJPPN.pop(0)
-                    self.__listJPIN.pop(0)
-                self.__listJPCP.append( copy.copy(_X) )
-                self.__listJPCI.append( copy.copy(_dX) )
-                self.__listJPCR.append( copy.copy(_Jacobienne) )
-                self.__listJPPN.append( numpy.linalg.norm(_X) )
-                self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
-        #
-        logging.debug("FDA Fin du calcul de la Jacobienne")
+            if (dotWith is not None) or (dotTWith is not None):
+                __Produit = self.__listdotwith__(_Jacobienne, dotWith, dotTWith)
+            else:
+                __Produit = None
+            if __Produit is None or self.__avoidRC:
+                _Jacobienne = numpy.transpose( numpy.vstack( _Jacobienne ) )
+                if self.__avoidRC:
+                    if self.__lenghtRJ < 0: self.__lenghtRJ = 2 * _X.size
+                    while len(self.__listJPCP) > self.__lenghtRJ:
+                        self.__listJPCP.pop(0)
+                        self.__listJPCI.pop(0)
+                        self.__listJPCR.pop(0)
+                        self.__listJPPN.pop(0)
+                        self.__listJPIN.pop(0)
+                    self.__listJPCP.append( copy.copy(_X) )
+                    self.__listJPCI.append( copy.copy(_dX) )
+                    self.__listJPCR.append( copy.copy(_Jacobienne) )
+                    self.__listJPPN.append( numpy.linalg.norm(_X) )
+                    self.__listJPIN.append( numpy.linalg.norm(_Jacobienne) )
+            logging.debug("FDA Fin du calcul de la Jacobienne")
+            if __Produit is not None:
+                return __Produit
         #
         return _Jacobienne
 
@@ -388,28 +424,27 @@ class FDApproximation(object):
         ne doivent pas être données ici à la fonction utilisateur.
         """
         if self.__mfEnabled:
-            assert len(paire) == 1, "Incorrect lenght of arguments"
+            assert len(paire) == 1, "Incorrect length of arguments"
             _paire = paire[0]
             assert len(_paire) == 2, "Incorrect number of arguments"
         else:
             assert len(paire) == 2, "Incorrect number of arguments"
             _paire = paire
         X, dX = _paire
-        _Jacobienne = self.TangentMatrix( X )
         if dX is None or len(dX) == 0:
             #
             # Calcul de la forme matricielle si le second argument est None
             # -------------------------------------------------------------
+            _Jacobienne = self.TangentMatrix( X )
             if self.__mfEnabled: return [_Jacobienne,]
             else:                return _Jacobienne
         else:
             #
             # Calcul de la valeur linéarisée de H en X appliqué à dX
             # ------------------------------------------------------
-            _dX = numpy.asmatrix(numpy.ravel( dX )).T
-            _HtX = numpy.dot(_Jacobienne, _dX)
-            if self.__mfEnabled: return [_HtX.A1,]
-            else:                return _HtX.A1
+            _HtX = self.TangentMatrix( X, dotWith = dX )
+            if self.__mfEnabled: return [_HtX,]
+            else:                return _HtX
 
     # ---------------------------------------------------------
     def AdjointOperator(self, paire, **extraArgs ):
@@ -420,28 +455,27 @@ class FDApproximation(object):
         ne doivent pas être données ici à la fonction utilisateur.
         """
         if self.__mfEnabled:
-            assert len(paire) == 1, "Incorrect lenght of arguments"
+            assert len(paire) == 1, "Incorrect length of arguments"
             _paire = paire[0]
             assert len(_paire) == 2, "Incorrect number of arguments"
         else:
             assert len(paire) == 2, "Incorrect number of arguments"
             _paire = paire
         X, Y = _paire
-        _JacobienneT = self.TangentMatrix( X ).T
         if Y is None or len(Y) == 0:
             #
             # Calcul de la forme matricielle si le second argument est None
             # -------------------------------------------------------------
+            _JacobienneT = self.TangentMatrix( X ).T
             if self.__mfEnabled: return [_JacobienneT,]
             else:                return _JacobienneT
         else:
             #
             # Calcul de la valeur de l'adjoint en X appliqué à Y
             # --------------------------------------------------
-            _Y = numpy.asmatrix(numpy.ravel( Y )).T
-            _HaY = numpy.dot(_JacobienneT, _Y)
-            if self.__mfEnabled: return [_HaY.A1,]
-            else:                return _HaY.A1
+            _HaY = self.TangentMatrix( X, dotTWith = Y )
+            if self.__mfEnabled: return [_HaY,]
+            else:                return _HaY
 
 # ==============================================================================
 def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
@@ -452,12 +486,12 @@ def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
     #
     if _bgcovariance is None:
-        BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
+        _Perturbations = numpy.tile( _bgcenter, _nbmembers)
     else:
         _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
-        BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
+        _Perturbations = numpy.tile( _bgcenter, _nbmembers) + _Z
     #
-    return BackgroundEnsemble
+    return _Perturbations
 
 # ==============================================================================
 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
@@ -479,29 +513,29 @@ def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _wi
     if _nbmembers < 1:
         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
     if _bgcovariance is None:
-        BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
+        _Perturbations = numpy.tile( _bgcenter, _nbmembers)
     else:
         if _withSVD:
-            U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
+            _U, _s, _V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
             _nbctl = _bgcenter.size
             if _nbmembers > _nbctl:
                 _Z = numpy.concatenate((numpy.dot(
-                    numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
+                    numpy.diag(numpy.sqrt(_s[:_nbctl])), _V[:_nbctl]),
                     numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
             else:
-                _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
+                _Z = numpy.dot(numpy.diag(numpy.sqrt(_s[:_nbmembers-1])), _V[:_nbmembers-1])
             _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
-            BackgroundEnsemble = _bgcenter + _Zca
+            _Perturbations = _bgcenter + _Zca
         else:
             if max(abs(_bgcovariance.flatten())) > 0:
                 _nbctl = _bgcenter.size
                 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
                 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
-                BackgroundEnsemble = _bgcenter + _Zca
+                _Perturbations = _bgcenter + _Zca
             else:
-                BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
+                _Perturbations = numpy.tile( _bgcenter, _nbmembers)
     #
-    return BackgroundEnsemble
+    return _Perturbations
 
 # ==============================================================================
 def EnsembleMean( __Ensemble ):
@@ -649,6 +683,31 @@ def CovarianceInflation(
     #
     return OutputCovOrEns
 
+# ==============================================================================
+def HessienneEstimation(nb, HaM, HtM, BI, RI):
+    "Estimation de la Hessienne"
+    #
+    HessienneI = []
+    for i in range(int(nb)):
+        _ee    = numpy.zeros((nb,1))
+        _ee[i] = 1.
+        _HtEE  = numpy.dot(HtM,_ee).reshape((-1,1))
+        HessienneI.append( numpy.ravel( BI * _ee + HaM * (RI * _HtEE) ) )
+    #
+    A = numpy.linalg.inv(numpy.array( HessienneI ))
+    #
+    if min(A.shape) != max(A.shape):
+        raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
+    if (numpy.diag(A) < 0).any():
+        raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
+    if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
+        try:
+            L = numpy.linalg.cholesky( A )
+        except:
+            raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
+    #
+    return A
+
 # ==============================================================================
 def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None):
     "Estimation des quantiles a posteriori (selfA est modifié)"
@@ -662,37 +721,36 @@ def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None):
     else:
         LBounds = None
     if LBounds is not None:
-        LBounds = numpy.asmatrix(ForceNumericBounds( LBounds ))
+        LBounds = ForceNumericBounds( LBounds )
+    _Xa = numpy.ravel(Xa)
     #
     # Échantillonnage des états
     YfQ  = None
     EXr  = None
-    if selfA._parameters["SimulationForQuantiles"] == "Linear" and HXa is not None:
-        HXa  = numpy.matrix(numpy.ravel( HXa )).T
     for i in range(nbsamples):
-        if selfA._parameters["SimulationForQuantiles"] == "Linear" and HtM is not None:
-            dXr = numpy.matrix(numpy.random.multivariate_normal(numpy.ravel(Xa),A) - numpy.ravel(Xa)).T
+        if selfA._parameters["SimulationForQuantiles"] == "Linear" and HtM is not None and HXa is not None:
+            dXr = (numpy.random.multivariate_normal(_Xa,A) - _Xa).reshape((-1,1))
             if LBounds is not None: # "EstimateProjection" par défaut
-                dXr = numpy.max(numpy.hstack((dXr,LBounds[:,0]) - Xa),axis=1)
-                dXr = numpy.min(numpy.hstack((dXr,LBounds[:,1]) - Xa),axis=1)
-            dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
-            Yr = HXa + dYr
-            if selfA._toStore("SampledStateForQuantiles"): Xr = Xa + dXr
+                dXr = numpy.max(numpy.hstack((dXr,LBounds[:,0].reshape((-1,1))) - Xa),axis=1)
+                dXr = numpy.min(numpy.hstack((dXr,LBounds[:,1].reshape((-1,1))) - Xa),axis=1)
+            dYr = HtM @ dXr
+            Yr = HXa.reshape((-1,1)) + dYr
+            if selfA._toStore("SampledStateForQuantiles"): Xr = _Xa + numpy.ravel(dXr)
         elif selfA._parameters["SimulationForQuantiles"] == "NonLinear" and Hm is not None:
-            Xr = numpy.matrix(numpy.random.multivariate_normal(numpy.ravel(Xa),A)).T
+            Xr = numpy.random.multivariate_normal(_Xa,A)
             if LBounds is not None: # "EstimateProjection" par défaut
-                Xr = numpy.max(numpy.hstack((Xr,LBounds[:,0])),axis=1)
-                Xr = numpy.min(numpy.hstack((Xr,LBounds[:,1])),axis=1)
-            Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
+                Xr = numpy.max(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,0].reshape((-1,1)))),axis=1)
+                Xr = numpy.min(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,1].reshape((-1,1)))),axis=1)
+            Yr = numpy.asarray(Hm( Xr ))
         else:
             raise ValueError("Quantile simulations has only to be Linear or NonLinear.")
         #
         if YfQ is None:
-            YfQ = Yr
-            if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.ravel(Xr)
+            YfQ = Yr.reshape((-1,1))
+            if selfA._toStore("SampledStateForQuantiles"): EXr = Xr.reshape((-1,1))
         else:
-            YfQ = numpy.hstack((YfQ,Yr))
-            if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.vstack((EXr,numpy.ravel(Xr)))
+            YfQ = numpy.hstack((YfQ,Yr.reshape((-1,1))))
+            if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.hstack((EXr,Xr.reshape((-1,1))))
     #
     # Extraction des quantiles
     YfQ.sort(axis=-1)
@@ -700,11 +758,12 @@ def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None):
     for quantile in selfA._parameters["Quantiles"]:
         if not (0. <= float(quantile) <= 1.): continue
         indice = int(nbsamples * float(quantile) - 1./nbsamples)
-        if YQ is None: YQ = YfQ[:,indice]
-        else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
-    selfA.StoredVariables["SimulationQuantiles"].store( YQ )
+        if YQ is None: YQ = YfQ[:,indice].reshape((-1,1))
+        else:          YQ = numpy.hstack((YQ,YfQ[:,indice].reshape((-1,1))))
+    if YQ is not None: # Liste non vide de quantiles
+        selfA.StoredVariables["SimulationQuantiles"].store( YQ )
     if selfA._toStore("SampledStateForQuantiles"):
-        selfA.StoredVariables["SampledStateForQuantiles"].store( EXr.T )
+        selfA.StoredVariables["SampledStateForQuantiles"].store( EXr )
     #
     return 0
 
@@ -716,17 +775,323 @@ def ForceNumericBounds( __Bounds ):
     # Converti toutes les bornes individuelles None à +/- l'infini
     __Bounds = numpy.asarray( __Bounds, dtype=float )
     if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
-        raise ValueError("Incorrectly defined bounds data")
-    __Bounds[numpy.isnan(__Bounds)[:,0],0] = -sys.float_info.max
-    __Bounds[numpy.isnan(__Bounds)[:,1],1] =  sys.float_info.max
+        raise ValueError("Incorrectly shaped bounds data")
+    __Bounds[numpy.isnan(__Bounds[:,0]),0] = -sys.float_info.max
+    __Bounds[numpy.isnan(__Bounds[:,1]),1] =  sys.float_info.max
     return __Bounds
 
 # ==============================================================================
 def RecentredBounds( __Bounds, __Center):
+    "Recentre les bornes autour de 0, sauf si globalement None"
     # Conserve une valeur par défaut à None s'il n'y a pas de bornes
     if __Bounds is None: return None
     # Recentre les valeurs numériques de bornes
-    return ForceNumericBounds( __Bounds ) - numpy.ravel( __Center ).transpose((-1,1))
+    return ForceNumericBounds( __Bounds ) - numpy.ravel( __Center ).reshape((-1,1))
+
+# ==============================================================================
+def ApplyBounds( __Vector, __Bounds, __newClip = True):
+    "Applique des bornes numériques à un point"
+    # Conserve une valeur par défaut s'il n'y a pas de bornes
+    if __Bounds is None: return __Vector
+    #
+    if not isinstance(__Vector, numpy.ndarray): # Is an array
+        raise ValueError("Incorrect array definition of vector data")
+    if not isinstance(__Bounds, numpy.ndarray): # Is an array
+        raise ValueError("Incorrect array definition of bounds data")
+    if 2*__Vector.size != __Bounds.size: # Is a 2 column array of vector lenght
+        raise ValueError("Incorrect bounds number (%i) to be applied for this vector (of size %i)"%(__Bounds.size,__Vector.size))
+    if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
+        raise ValueError("Incorrectly shaped bounds data")
+    #
+    if __newClip:
+        __Vector = __Vector.clip(
+            __Bounds[:,0].reshape(__Vector.shape),
+            __Bounds[:,1].reshape(__Vector.shape),
+            )
+    else:
+        __Vector = numpy.max(numpy.hstack((__Vector.reshape((-1,1)),numpy.asmatrix(__Bounds)[:,0])),axis=1)
+        __Vector = numpy.min(numpy.hstack((__Vector.reshape((-1,1)),numpy.asmatrix(__Bounds)[:,1])),axis=1)
+        __Vector = numpy.asarray(__Vector)
+    #
+    return __Vector
+
+# ==============================================================================
+def Apply3DVarRecentringOnEnsemble(__EnXn, __EnXf, __Ynpu, __HO, __R, __B, __Betaf):
+    "Recentre l'ensemble Xn autour de l'analyse 3DVAR"
+    #
+    Xf = EnsembleMean( __EnXf )
+    Pf = Covariance( asCovariance=EnsembleErrorCovariance(__EnXf) )
+    Pf = (1 - __Betaf) * __B + __Betaf * Pf
+    #
+    selfB = PartialAlgorithm("3DVAR")
+    selfB._parameters["Minimizer"] = "LBFGSB"
+    selfB._parameters["MaximumNumberOfSteps"] = 15000
+    selfB._parameters["CostDecrementTolerance"] = 1.e-7
+    selfB._parameters["ProjectedGradientTolerance"] = -1
+    selfB._parameters["GradientNormTolerance"] = 1.e-05
+    selfB._parameters["StoreInternalVariables"] = False
+    selfB._parameters["optiprint"] = -1
+    selfB._parameters["optdisp"] = 0
+    selfB._parameters["Bounds"] = None
+    selfB._parameters["InitializationPoint"] = Xf
+    std3dvar(selfB, Xf, __Ynpu, None, __HO, None, None, __R, Pf, None)
+    Xa = selfB.get("Analysis")[-1].reshape((-1,1))
+    del selfB
+    #
+    return Xa + EnsembleOfAnomalies( __EnXn )
+
+# ==============================================================================
+def c2ukf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
+    """
+    Constrained Unscented Kalman Filter
+    """
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA._parameters["StoreInternalVariables"] = True
+    selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
+    #
+    L     = Xb.size
+    Alpha = selfA._parameters["Alpha"]
+    Beta  = selfA._parameters["Beta"]
+    if selfA._parameters["Kappa"] == 0:
+        if selfA._parameters["EstimationOf"] == "State":
+            Kappa = 0
+        elif selfA._parameters["EstimationOf"] == "Parameters":
+            Kappa = 3 - L
+    else:
+        Kappa = selfA._parameters["Kappa"]
+    Lambda = float( Alpha**2 ) * ( L + Kappa ) - L
+    Gamma  = math.sqrt( L + Lambda )
+    #
+    Ww = []
+    Ww.append( 0. )
+    for i in range(2*L):
+        Ww.append( 1. / (2.*(L + Lambda)) )
+    #
+    Wm = numpy.array( Ww )
+    Wm[0] = Lambda / (L + Lambda)
+    Wc = numpy.array( Ww )
+    Wc[0] = Lambda / (L + Lambda) + (1. - Alpha**2 + Beta)
+    #
+    # Opérateurs
+    Hm = HO["Direct"].appliedControledFormTo
+    #
+    if selfA._parameters["EstimationOf"] == "State":
+        Mm = EM["Direct"].appliedControledFormTo
+    #
+    if CM is not None and "Tangent" in CM and U is not None:
+        Cm = CM["Tangent"].asMatrix(Xb)
+    else:
+        Cm = None
+    #
+    # Durée d'observation et tailles
+    if hasattr(Y,"stepnumber"):
+        duration = Y.stepnumber()
+        __p = numpy.cumprod(Y.shape())[-1]
+    else:
+        duration = 2
+        __p = numpy.array(Y).size
+    #
+    # Précalcul des inversions de B et R
+    if selfA._parameters["StoreInternalVariables"] \
+        or selfA._toStore("CostFunctionJ") \
+        or selfA._toStore("CostFunctionJb") \
+        or selfA._toStore("CostFunctionJo") \
+        or selfA._toStore("CurrentOptimum") \
+        or selfA._toStore("APosterioriCovariance"):
+        BI = B.getI()
+        RI = R.getI()
+    #
+    __n = Xb.size
+    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
+    #
+    if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
+        Xn = Xb
+        if hasattr(B,"asfullmatrix"):
+            Pn = B.asfullmatrix(__n)
+        else:
+            Pn = B
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        selfA.StoredVariables["Analysis"].store( Xb )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
+    elif selfA._parameters["nextStep"]:
+        Xn = selfA._getInternalState("Xn")
+        Pn = selfA._getInternalState("Pn")
+    #
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        XaMin            = Xn
+        previousJMinimum = numpy.finfo(float).max
+    #
+    for step in range(duration-1):
+        if hasattr(Y,"store"):
+            Ynpu = numpy.ravel( Y[step+1] ).reshape((__p,1))
+        else:
+            Ynpu = numpy.ravel( Y ).reshape((__p,1))
+        #
+        if U is not None:
+            if hasattr(U,"store") and len(U)>1:
+                Un = numpy.ravel( U[step] ).reshape((-1,1))
+            elif hasattr(U,"store") and len(U)==1:
+                Un = numpy.ravel( U[0] ).reshape((-1,1))
+            else:
+                Un = numpy.ravel( U ).reshape((-1,1))
+        else:
+            Un = None
+        #
+        Pndemi = numpy.real(scipy.linalg.sqrtm(Pn))
+        Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
+        nbSpts = 2*Xn.size+1
+        #
+        if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
+            for point in range(nbSpts):
+                Xnp[:,point] = ApplyBounds( Xnp[:,point], selfA._parameters["Bounds"] )
+        #
+        XEtnnp = []
+        for point in range(nbSpts):
+            if selfA._parameters["EstimationOf"] == "State":
+                XEtnnpi = numpy.asarray( Mm( (Xnp[:,point], Un) ) ).reshape((-1,1))
+                if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
+                    Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
+                    XEtnnpi = XEtnnpi + Cm @ Un
+                if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
+                    XEtnnpi = ApplyBounds( XEtnnpi, selfA._parameters["Bounds"] )
+            elif selfA._parameters["EstimationOf"] == "Parameters":
+                # --- > Par principe, M = Id, Q = 0
+                XEtnnpi = Xnp[:,point]
+            XEtnnp.append( numpy.ravel(XEtnnpi).reshape((-1,1)) )
+        XEtnnp = numpy.concatenate( XEtnnp, axis=1 )
+        #
+        Xncm = ( XEtnnp * Wm ).sum(axis=1)
+        #
+        if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
+            Xncm = ApplyBounds( Xncm, selfA._parameters["Bounds"] )
+        #
+        if selfA._parameters["EstimationOf"] == "State":        Pnm = Q
+        elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
+        for point in range(nbSpts):
+            Pnm += Wc[i] * ((XEtnnp[:,point]-Xncm).reshape((-1,1)) * (XEtnnp[:,point]-Xncm))
+        #
+        if selfA._parameters["EstimationOf"] == "Parameters" and selfA._parameters["Bounds"] is not None:
+            Pnmdemi = selfA._parameters["Reconditioner"] * numpy.real(scipy.linalg.sqrtm(Pnm))
+        else:
+            Pnmdemi = numpy.real(scipy.linalg.sqrtm(Pnm))
+        #
+        Xnnp = numpy.hstack([Xncm.reshape((-1,1)), Xncm.reshape((-1,1))+Gamma*Pnmdemi, Xncm.reshape((-1,1))-Gamma*Pnmdemi])
+        #
+        if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
+            for point in range(nbSpts):
+                Xnnp[:,point] = ApplyBounds( Xnnp[:,point], selfA._parameters["Bounds"] )
+        #
+        Ynnp = []
+        for point in range(nbSpts):
+            if selfA._parameters["EstimationOf"] == "State":
+                Ynnpi = Hm( (Xnnp[:,point], None) )
+            elif selfA._parameters["EstimationOf"] == "Parameters":
+                Ynnpi = Hm( (Xnnp[:,point], Un) )
+            Ynnp.append( numpy.ravel(Ynnpi).reshape((-1,1)) )
+        Ynnp = numpy.concatenate( Ynnp, axis=1 )
+        #
+        Yncm = ( Ynnp * Wm ).sum(axis=1)
+        #
+        Pyyn = R
+        Pxyn = 0.
+        for point in range(nbSpts):
+            Pyyn += Wc[i] * ((Ynnp[:,point]-Yncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
+            Pxyn += Wc[i] * ((Xnnp[:,point]-Xncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
+        #
+        _Innovation  = Ynpu - Yncm.reshape((-1,1))
+        if selfA._parameters["EstimationOf"] == "Parameters":
+            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
+                _Innovation = _Innovation - Cm @ Un
+        #
+        Kn = Pxyn * Pyyn.I
+        Xn = Xncm.reshape((-1,1)) + Kn * _Innovation
+        Pn = Pnm - Kn * Pyyn * Kn.T
+        #
+        if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
+            Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
+        #
+        Xa = Xn # Pointeurs
+        #--------------------------
+        selfA._setInternalState("Xn", Xn)
+        selfA._setInternalState("Pn", Pn)
+        #--------------------------
+        #
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        # ---> avec analysis
+        selfA.StoredVariables["Analysis"].store( Xa )
+        if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( Hm((Xa, Un)) )
+        if selfA._toStore("InnovationAtCurrentAnalysis"):
+            selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
+        # ---> avec current state
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CurrentState"):
+            selfA.StoredVariables["CurrentState"].store( Xn )
+        if selfA._toStore("ForecastState"):
+            selfA.StoredVariables["ForecastState"].store( Xncm )
+        if selfA._toStore("ForecastCovariance"):
+            selfA.StoredVariables["ForecastCovariance"].store( Pnm )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( Xncm - Xa )
+        if selfA._toStore("InnovationAtCurrentState"):
+            selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
+        if selfA._toStore("SimulatedObservationAtCurrentState") \
+            or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Yncm )
+        # ---> autres
+        if selfA._parameters["StoreInternalVariables"] \
+            or selfA._toStore("CostFunctionJ") \
+            or selfA._toStore("CostFunctionJb") \
+            or selfA._toStore("CostFunctionJo") \
+            or selfA._toStore("CurrentOptimum") \
+            or selfA._toStore("APosterioriCovariance"):
+            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
+            J   = Jb + Jo
+            selfA.StoredVariables["CostFunctionJb"].store( Jb )
+            selfA.StoredVariables["CostFunctionJo"].store( Jo )
+            selfA.StoredVariables["CostFunctionJ" ].store( J )
+            #
+            if selfA._toStore("IndexOfOptimum") \
+                or selfA._toStore("CurrentOptimum") \
+                or selfA._toStore("CostFunctionJAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
+                or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
+                or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+            if selfA._toStore("IndexOfOptimum"):
+                selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+            if selfA._toStore("CurrentOptimum"):
+                selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
+            if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+                selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
+            if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+            if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+            if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+                selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
+        if selfA._parameters["EstimationOf"] == "Parameters" \
+            and J < previousJMinimum:
+            previousJMinimum    = J
+            XaMin               = Xa
+            if selfA._toStore("APosterioriCovariance"):
+                covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
+    #
+    # Stockage final supplémentaire de l'optimum en estimation de paramètres
+    # ----------------------------------------------------------------------
+    if selfA._parameters["EstimationOf"] == "Parameters":
+        selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+        selfA.StoredVariables["Analysis"].store( XaMin )
+        if selfA._toStore("APosterioriCovariance"):
+            selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
+        if selfA._toStore("BMA"):
+            selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
+    #
+    return 0
 
 # ==============================================================================
 def cekf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
@@ -767,6 +1132,7 @@ def cekf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         RI = R.getI()
     #
     __n = Xb.size
+    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
     #
     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
         Xn = Xb
@@ -815,23 +1181,21 @@ def cekf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
             Un = None
         #
         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
-            Xn = numpy.max(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
-            Xn = numpy.min(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
+            Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
         #
         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
             Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
-                Xn_predicted = Xn_predicted + Cm * Un
-            Pn_predicted = Q + Mt * Pn * Ma
+                Xn_predicted = Xn_predicted + Cm @ Un
+            Pn_predicted = Q + Mt * (Pn * Ma)
         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
             # --- > Par principe, M = Id, Q = 0
             Xn_predicted = Xn
             Pn_predicted = Pn
         #
         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
-            Xn_predicted = numpy.max(numpy.hstack((Xn_predicted,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
-            Xn_predicted = numpy.min(numpy.hstack((Xn_predicted,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
+            Xn_predicted = ApplyBounds( Xn_predicted, selfA._parameters["Bounds"] )
         #
         if selfA._parameters["EstimationOf"] == "State":
             HX_predicted = numpy.ravel( H( (Xn_predicted, None) ) ).reshape((__p,1))
@@ -840,15 +1204,14 @@ def cekf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
             HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
             _Innovation  = Ynpu - HX_predicted
             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
-                _Innovation = _Innovation - Cm * Un
+                _Innovation = _Innovation - Cm @ Un
         #
         Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
         Xn = Xn_predicted + Kn * _Innovation
         Pn = Pn_predicted - Kn * Ht * Pn_predicted
         #
         if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
-            Xn = numpy.max(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
-            Xn = numpy.min(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
+            Xn = ApplyBounds( Xn, selfA._parameters["Bounds"] )
         #
         Xa = Xn # Pointeurs
         #--------------------------
@@ -997,11 +1360,11 @@ def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterForm
         #
         if U is not None:
             if hasattr(U,"store") and len(U)>1:
-                Un = numpy.asmatrix(numpy.ravel( U[step] )).T
+                Un = numpy.ravel( U[step] ).reshape((-1,1))
             elif hasattr(U,"store") and len(U)==1:
-                Un = numpy.asmatrix(numpy.ravel( U[0] )).T
+                Un = numpy.ravel( U[0] ).reshape((-1,1))
             else:
-                Un = numpy.asmatrix(numpy.ravel( U )).T
+                Un = numpy.ravel( U ).reshape((-1,1))
         else:
             Un = None
         #
@@ -1017,7 +1380,7 @@ def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterForm
                     returnSerieAsArrayMatrix = True )
                 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
                     Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
-                    EZ = EZ + Cm * Un
+                    EZ = EZ + Cm @ Un
             elif selfA._parameters["EstimationOf"] == "Parameters":
                 # --- > Par principe, M = Id, Q = 0
                 EZ = H( [(EL[:,i], Un) for i in range(__m)],
@@ -1073,7 +1436,10 @@ def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterForm
     return 0
 
 # ==============================================================================
-def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
+def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
+    VariantM="KalmanFilterFormula",
+    Hybrid=None,
+    ):
     """
     Ensemble-Transform EnKF
     """
@@ -1115,6 +1481,8 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
     #
     __n = Xb.size
     __m = selfA._parameters["NumberOfMembers"]
+    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
+    previousJMinimum = numpy.finfo(float).max
     #
     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
         Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
@@ -1128,8 +1496,6 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
     elif selfA._parameters["nextStep"]:
         Xn = selfA._getInternalState("Xn")
     #
-    previousJMinimum = numpy.finfo(float).max
-    #
     for step in range(duration-1):
         numpy.random.set_state(selfA._getInternalState("seed"))
         if hasattr(Y,"store"):
@@ -1139,11 +1505,11 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
         #
         if U is not None:
             if hasattr(U,"store") and len(U)>1:
-                Un = numpy.asmatrix(numpy.ravel( U[step] )).T
+                Un = numpy.ravel( U[step] ).reshape((-1,1))
             elif hasattr(U,"store") and len(U)==1:
-                Un = numpy.asmatrix(numpy.ravel( U[0] )).T
+                Un = numpy.ravel( U[0] ).reshape((-1,1))
             else:
-                Un = numpy.asmatrix(numpy.ravel( U )).T
+                Un = numpy.ravel( U ).reshape((-1,1))
         else:
             Un = None
         #
@@ -1163,7 +1529,7 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
                 returnSerieAsArrayMatrix = True )
             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
-                Xn_predicted = Xn_predicted + Cm * Un
+                Xn_predicted = Xn_predicted + Cm @ Un
         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
             # --- > Par principe, M = Id, Q = 0
             Xn_predicted = EMX = Xn
@@ -1172,8 +1538,8 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
                 returnSerieAsArrayMatrix = True )
         #
         # Mean of forecast and observation of forecast
-        Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
-        Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
+        Xfm  = EnsembleMean( Xn_predicted )
+        Hfm  = EnsembleMean( HX_predicted )
         #
         # Anomalies
         EaX   = EnsembleOfAnomalies( Xn_predicted, Xfm )
@@ -1261,7 +1627,7 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
             def CostFunction(w):
                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
-                _Jo = 0.5 * _A.T * RI * _A
+                _Jo = 0.5 * _A.T * (RI * _A)
                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
                 _J  = _Jo + _Jb
                 return float(_J)
@@ -1332,7 +1698,11 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
                 selfA._parameters["InflationFactor"],
                 )
         #
-        Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+        if Hybrid == "E3DVAR":
+            betaf = selfA._parameters["HybridCovarianceEquilibrium"]
+            Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
+        #
+        Xa = EnsembleMean( Xn )
         #--------------------------
         selfA._setInternalState("Xn", Xn)
         selfA._setInternalState("seed", numpy.random.get_state())
@@ -1346,7 +1716,7 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
             or selfA._toStore("InnovationAtCurrentAnalysis") \
             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
+            _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
             _Innovation = Ynpu - _HXa
         #
         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
@@ -1378,8 +1748,8 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
             or selfA._toStore("CostFunctionJo") \
             or selfA._toStore("CurrentOptimum") \
             or selfA._toStore("APosterioriCovariance"):
-            Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
-            Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
+            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
             J   = Jb + Jo
             selfA.StoredVariables["CostFunctionJb"].store( Jb )
             selfA.StoredVariables["CostFunctionJo"].store( Jo )
@@ -1466,6 +1836,7 @@ def exkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         RI = R.getI()
     #
     __n = Xb.size
+    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
     #
     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
         Xn = Xb
@@ -1517,8 +1888,8 @@ def exkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
             Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
-                Xn_predicted = Xn_predicted + Cm * Un
-            Pn_predicted = Q + Mt * Pn * Ma
+                Xn_predicted = Xn_predicted + Cm @ Un
+            Pn_predicted = Q + Mt * (Pn * Ma)
         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
             # --- > Par principe, M = Id, Q = 0
             Xn_predicted = Xn
@@ -1531,7 +1902,7 @@ def exkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
             HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
             _Innovation  = Ynpu - HX_predicted
             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
-                _Innovation = _Innovation - Cm * Un
+                _Innovation = _Innovation - Cm @ Un
         #
         Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
         Xn = Xn_predicted + Kn * _Innovation
@@ -1659,6 +2030,8 @@ def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
     #
     __n = Xb.size
     __m = selfA._parameters["NumberOfMembers"]
+    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
+    previousJMinimum = numpy.finfo(float).max
     #
     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
         if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
@@ -1674,8 +2047,6 @@ def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
     elif selfA._parameters["nextStep"]:
         Xn = selfA._getInternalState("Xn")
     #
-    previousJMinimum = numpy.finfo(float).max
-    #
     for step in range(duration-1):
         numpy.random.set_state(selfA._getInternalState("seed"))
         if hasattr(Y,"store"):
@@ -1685,11 +2056,11 @@ def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
         #
         if U is not None:
             if hasattr(U,"store") and len(U)>1:
-                Un = numpy.asmatrix(numpy.ravel( U[step] )).T
+                Un = numpy.ravel( U[step] ).reshape((-1,1))
             elif hasattr(U,"store") and len(U)==1:
-                Un = numpy.asmatrix(numpy.ravel( U[0] )).T
+                Un = numpy.ravel( U[0] ).reshape((-1,1))
             else:
-                Un = numpy.asmatrix(numpy.ravel( U )).T
+                Un = numpy.ravel( U ).reshape((-1,1))
         else:
             Un = None
         #
@@ -1764,7 +2135,7 @@ def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
                 selfA._parameters["InflationFactor"],
                 )
         #
-        Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+        Xa = EnsembleMean( Xn )
         #--------------------------
         selfA._setInternalState("Xn", Xn)
         selfA._setInternalState("seed", numpy.random.get_state())
@@ -1778,7 +2149,7 @@ def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
             or selfA._toStore("InnovationAtCurrentAnalysis") \
             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
+            _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
             _Innovation = Ynpu - _HXa
         #
         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
@@ -1810,8 +2181,8 @@ def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
             or selfA._toStore("CostFunctionJo") \
             or selfA._toStore("CurrentOptimum") \
             or selfA._toStore("APosterioriCovariance"):
-            Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
-            Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
+            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
             J   = Jb + Jo
             selfA.StoredVariables["CostFunctionJb"].store( Jb )
             selfA.StoredVariables["CostFunctionJo"].store( Jo )
@@ -1844,6 +2215,9 @@ def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
             XaMin               = Xa
             if selfA._toStore("APosterioriCovariance"):
                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
+        # ---> Pour les smoothers
+        if selfA._toStore("CurrentEnsembleState"):
+            selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
     #
     # Stockage final supplémentaire de l'optimum en estimation de paramètres
     # ----------------------------------------------------------------------
@@ -1865,18 +2239,12 @@ def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
     #
     # Initialisations
     # ---------------
-    #
-    # Opérateur non-linéaire pour la boucle externe
     Hm = HO["Direct"].appliedTo
     #
-    # Précalcul des inversions de B et R
     BI = B.getI()
     RI = R.getI()
     #
-    # Point de démarrage de l'optimisation
-    Xini = selfA._parameters["InitializationPoint"]
-    #
-    HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
+    HXb = numpy.asarray(Hm( Xb )).reshape((-1,1))
     Innovation = Y - HXb
     #
     # Outer Loop
@@ -1884,7 +2252,7 @@ def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
     iOuter = 0
     J      = 1./mpr
     DeltaJ = 1./mpr
-    Xr     = Xini.reshape((-1,1))
+    Xr     = numpy.asarray(selfA._parameters["InitializationPoint"]).reshape((-1,1))
     while abs(DeltaJ) >= selfA._parameters["CostDecrementTolerance"] and iOuter <= selfA._parameters["MaximumNumberOfSteps"]:
         #
         # Inner Loop
@@ -1895,13 +2263,12 @@ def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         # Définition de la fonction-coût
         # ------------------------------
         def CostFunction(dx):
-            _dX  = numpy.asmatrix(numpy.ravel( dx )).T
+            _dX  = numpy.asarray(dx).reshape((-1,1))
             if selfA._parameters["StoreInternalVariables"] or \
                 selfA._toStore("CurrentState") or \
                 selfA._toStore("CurrentOptimum"):
                 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
-            _HdX = Ht * _dX
-            _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
+            _HdX = (Ht @ _dX).reshape((-1,1))
             _dInnovation = Innovation - _HdX
             if selfA._toStore("SimulatedObservationAtCurrentState") or \
                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
@@ -1909,8 +2276,8 @@ def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
             if selfA._toStore("InnovationAtCurrentState"):
                 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
             #
-            Jb  = float( 0.5 * _dX.T * BI * _dX )
-            Jo  = float( 0.5 * _dInnovation.T * RI * _dInnovation )
+            Jb  = float( 0.5 * _dX.T * (BI * _dX) )
+            Jo  = float( 0.5 * _dInnovation.T * (RI * _dInnovation) )
             J   = Jb + Jo
             #
             selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
@@ -1939,11 +2306,10 @@ def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
             return J
         #
         def GradientOfCostFunction(dx):
-            _dX          = numpy.asmatrix(numpy.ravel( dx )).T
-            _HdX         = Ht * _dX
-            _HdX         = numpy.asmatrix(numpy.ravel( _HdX )).T
+            _dX          = numpy.ravel( dx )
+            _HdX         = (Ht @ _dX).reshape((-1,1))
             _dInnovation = Innovation - _HdX
-            GradJb       = BI * _dX
+            GradJb       = BI @ _dX
             GradJo       = - Ht.T @ (RI * _dInnovation)
             GradJ        = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
             return GradJ
@@ -1960,7 +2326,7 @@ def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
                 import scipy.optimize as optimiseur
             Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
                 func        = CostFunction,
-                x0          = numpy.zeros(Xini.size),
+                x0          = numpy.zeros(Xb.size),
                 fprime      = GradientOfCostFunction,
                 args        = (),
                 bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
@@ -1974,7 +2340,7 @@ def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         elif selfA._parameters["Minimizer"] == "TNC":
             Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
                 func        = CostFunction,
-                x0          = numpy.zeros(Xini.size),
+                x0          = numpy.zeros(Xb.size),
                 fprime      = GradientOfCostFunction,
                 args        = (),
                 bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
@@ -1986,7 +2352,7 @@ def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         elif selfA._parameters["Minimizer"] == "CG":
             Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
                 f           = CostFunction,
-                x0          = numpy.zeros(Xini.size),
+                x0          = numpy.zeros(Xb.size),
                 fprime      = GradientOfCostFunction,
                 args        = (),
                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
@@ -1997,7 +2363,7 @@ def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         elif selfA._parameters["Minimizer"] == "NCG":
             Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
                 f           = CostFunction,
-                x0          = numpy.zeros(Xini.size),
+                x0          = numpy.zeros(Xb.size),
                 fprime      = GradientOfCostFunction,
                 args        = (),
                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
@@ -2008,7 +2374,7 @@ def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         elif selfA._parameters["Minimizer"] == "BFGS":
             Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
                 f           = CostFunction,
-                x0          = numpy.zeros(Xini.size),
+                x0          = numpy.zeros(Xb.size),
                 fprime      = GradientOfCostFunction,
                 args        = (),
                 maxiter     = selfA._parameters["MaximumNumberOfSteps"],
@@ -2024,17 +2390,15 @@ def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         #
         if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
             Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
-            Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
         else:
-            Minimum = Xb + numpy.asmatrix(numpy.ravel( Minimum )).T
+            Minimum = Xb + Minimum.reshape((-1,1))
         #
         Xr     = Minimum
         DeltaJ = selfA.StoredVariables["CostFunctionJ" ][-1] - J
         iOuter = selfA.StoredVariables["CurrentIterationNumber"][-1]
     #
-    # Obtention de l'analyse
-    # ----------------------
     Xa = Xr
+    #--------------------------
     #
     selfA.StoredVariables["Analysis"].store( Xa )
     #
@@ -2049,8 +2413,6 @@ def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         else:
             HXa = Hm( Xa )
     #
-    # Calcul de la covariance d'analyse
-    # ---------------------------------
     if selfA._toStore("APosterioriCovariance") or \
         selfA._toStore("SimulationQuantiles") or \
         selfA._toStore("JacobianMatrixAtOptimum") or \
@@ -2064,25 +2426,7 @@ def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
     if selfA._toStore("APosterioriCovariance") or \
         selfA._toStore("SimulationQuantiles"):
-        HessienneI = []
-        nb = Xa.size
-        for i in range(nb):
-            _ee    = numpy.matrix(numpy.zeros(nb)).T
-            _ee[i] = 1.
-            _HtEE  = numpy.dot(HtM,_ee)
-            _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
-            HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
-        HessienneI = numpy.matrix( HessienneI )
-        A = HessienneI.I
-        if min(A.shape) != max(A.shape):
-            raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
-        if (numpy.diag(A) < 0).any():
-            raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
-        if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
-            try:
-                L = numpy.linalg.cholesky( A )
-            except:
-                raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
+        A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
     if selfA._toStore("APosterioriCovariance"):
         selfA.StoredVariables["APosterioriCovariance"].store( A )
     if selfA._toStore("JacobianMatrixAtOptimum"):
@@ -2100,30 +2444,32 @@ def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         selfA._toStore("OMB"):
         d  = Y - HXb
     if selfA._toStore("Innovation"):
-        selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
+        selfA.StoredVariables["Innovation"].store( d )
     if selfA._toStore("BMA"):
         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
     if selfA._toStore("OMA"):
         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
     if selfA._toStore("OMB"):
-        selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
+        selfA.StoredVariables["OMB"].store( d )
     if selfA._toStore("SigmaObs2"):
         TraceR = R.trace(Y.size)
-        selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
+        selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
     if selfA._toStore("MahalanobisConsistency"):
         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
     if selfA._toStore("SimulationQuantiles"):
         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
     if selfA._toStore("SimulatedObservationAtBackground"):
-        selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
+        selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
     if selfA._toStore("SimulatedObservationAtOptimum"):
-        selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
+        selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
     #
     return 0
 
 # ==============================================================================
-def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
-    BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
+def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
+    VariantM="MLEF13", BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000,
+    Hybrid=None,
+    ):
     """
     Maximum Likelihood Ensemble Filter
     """
@@ -2161,6 +2507,8 @@ def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
     #
     __n = Xb.size
     __m = selfA._parameters["NumberOfMembers"]
+    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
+    previousJMinimum = numpy.finfo(float).max
     #
     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
         Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
@@ -2174,8 +2522,6 @@ def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
     elif selfA._parameters["nextStep"]:
         Xn = selfA._getInternalState("Xn")
     #
-    previousJMinimum = numpy.finfo(float).max
-    #
     for step in range(duration-1):
         numpy.random.set_state(selfA._getInternalState("seed"))
         if hasattr(Y,"store"):
@@ -2185,11 +2531,11 @@ def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
         #
         if U is not None:
             if hasattr(U,"store") and len(U)>1:
-                Un = numpy.asmatrix(numpy.ravel( U[step] )).T
+                Un = numpy.ravel( U[step] ).reshape((-1,1))
             elif hasattr(U,"store") and len(U)==1:
-                Un = numpy.asmatrix(numpy.ravel( U[0] )).T
+                Un = numpy.ravel( U[0] ).reshape((-1,1))
             else:
-                Un = numpy.asmatrix(numpy.ravel( U )).T
+                Un = numpy.ravel( U ).reshape((-1,1))
         else:
             Un = None
         #
@@ -2206,7 +2552,7 @@ def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
-                Xn_predicted = Xn_predicted + Cm * Un
+                Xn_predicted = Xn_predicted + Cm @ Un
         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
             # --- > Par principe, M = Id, Q = 0
             Xn_predicted = EMX = Xn
@@ -2264,7 +2610,11 @@ def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
                 selfA._parameters["InflationFactor"],
                 )
         #
-        Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+        if Hybrid == "E3DVAR":
+            betaf = selfA._parameters["HybridCovarianceEquilibrium"]
+            Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
+        #
+        Xa = EnsembleMean( Xn )
         #--------------------------
         selfA._setInternalState("Xn", Xn)
         selfA._setInternalState("seed", numpy.random.get_state())
@@ -2278,7 +2628,7 @@ def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
             or selfA._toStore("InnovationAtCurrentAnalysis") \
             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
+            _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
             _Innovation = Ynpu - _HXa
         #
         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
@@ -2310,8 +2660,8 @@ def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
             or selfA._toStore("CostFunctionJo") \
             or selfA._toStore("CurrentOptimum") \
             or selfA._toStore("APosterioriCovariance"):
-            Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
-            Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
+            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
             J   = Jb + Jo
             selfA.StoredVariables["CostFunctionJb"].store( Jb )
             selfA.StoredVariables["CostFunctionJo"].store( Jo )
@@ -2344,6 +2694,9 @@ def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
             XaMin               = Xa
             if selfA._toStore("APosterioriCovariance"):
                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
+        # ---> Pour les smoothers
+        if selfA._toStore("CurrentEnsembleState"):
+            selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
     #
     # Stockage final supplémentaire de l'optimum en estimation de paramètres
     # ----------------------------------------------------------------------
@@ -2403,7 +2756,7 @@ def mmqr(
         iteration += 1
         #
         Derivees  = numpy.array(fprime(variables))
-        Derivees  = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
+        Derivees  = Derivees.reshape(n,p) # ADAO & check shape
         DeriveesT = Derivees.transpose()
         M         =   numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
         SM        =   numpy.transpose(numpy.dot( DeriveesT , veps ))
@@ -2442,8 +2795,13 @@ def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
     """
     #
     # Initialisation
+    # --------------
     if selfA._parameters["EstimationOf"] == "State":
-        M = EM["Direct"].appliedTo
+        M = EM["Direct"].appliedControledFormTo
+        if CM is not None and "Tangent" in CM and U is not None:
+            Cm = CM["Tangent"].asMatrix(Xb)
+        else:
+            Cm = None
         #
         if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
             Xn = numpy.ravel(Xb).reshape((-1,1))
@@ -2472,16 +2830,29 @@ def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
         else:
             Ynpu = numpy.ravel( Y ).reshape((-1,1))
         #
+        if U is not None:
+            if hasattr(U,"store") and len(U)>1:
+                Un = numpy.ravel( U[step] ).reshape((-1,1))
+            elif hasattr(U,"store") and len(U)==1:
+                Un = numpy.ravel( U[0] ).reshape((-1,1))
+            else:
+                Un = numpy.ravel( U ).reshape((-1,1))
+        else:
+            Un = None
+        #
         if selfA._parameters["EstimationOf"] == "State": # Forecast
-            Xn_predicted = M( Xn )
+            Xn_predicted = M( (Xn, Un) )
             if selfA._toStore("ForecastState"):
                 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
+            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
+                Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
+                Xn_predicted = Xn_predicted + Cm @ Un
         elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
             # --- > Par principe, M = Id, Q = 0
             Xn_predicted = Xn
         Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
         #
-        oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
+        oneCycle(selfA, Xn_predicted, Ynpu, None, HO, None, None, R, B, None)
         #
         Xn = selfA.StoredVariables["Analysis"][-1]
         #--------------------------
@@ -2497,16 +2868,13 @@ def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
     #
     # Initialisations
     # ---------------
-    #
-    # Opérateurs
     Hm = HO["Direct"].appliedTo
     #
-    # Utilisation éventuelle d'un vecteur H(Xb) précalculé
     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
-        HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
+        HXb = numpy.asarray(Hm( Xb, HO["AppliedInX"]["HXb"] ))
     else:
-        HXb = Hm( Xb )
-    HXb = numpy.asmatrix(numpy.ravel( HXb )).T
+        HXb = numpy.asarray(Hm( Xb ))
+    HXb = numpy.ravel( HXb ).reshape((-1,1))
     if Y.size != HXb.size:
         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
     if max(Y.shape) != max(HXb.shape):
@@ -2522,25 +2890,24 @@ def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
     HBHTpR = R + Ht * BHT
     Innovation = Y - HXb
     #
-    # Point de démarrage de l'optimisation
-    Xini = numpy.zeros(Xb.shape)
+    Xini = numpy.zeros(Y.size)
     #
     # Définition de la fonction-coût
     # ------------------------------
     def CostFunction(w):
-        _W = numpy.asmatrix(numpy.ravel( w )).T
+        _W = numpy.asarray(w).reshape((-1,1))
         if selfA._parameters["StoreInternalVariables"] or \
             selfA._toStore("CurrentState") or \
             selfA._toStore("CurrentOptimum"):
-            selfA.StoredVariables["CurrentState"].store( Xb + BHT * _W )
+            selfA.StoredVariables["CurrentState"].store( Xb + BHT @ _W )
         if selfA._toStore("SimulatedObservationAtCurrentState") or \
             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT * _W ) )
+            selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm( Xb + BHT @ _W ) )
         if selfA._toStore("InnovationAtCurrentState"):
             selfA.StoredVariables["InnovationAtCurrentState"].store( Innovation )
         #
-        Jb  = float( 0.5 * _W.T * HBHTpR * _W )
-        Jo  = float( - _W.T * Innovation )
+        Jb  = float( 0.5 * _W.T @ (HBHTpR @ _W) )
+        Jo  = float( - _W.T @ Innovation )
         J   = Jb + Jo
         #
         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
@@ -2569,8 +2936,8 @@ def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         return J
     #
     def GradientOfCostFunction(w):
-        _W = numpy.asmatrix(numpy.ravel( w )).T
-        GradJb  = HBHTpR * _W
+        _W = numpy.asarray(w).reshape((-1,1))
+        GradJb  = HBHTpR @ _W
         GradJo  = - Innovation
         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
         return GradJ
@@ -2589,7 +2956,6 @@ def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
             x0          = Xini,
             fprime      = GradientOfCostFunction,
             args        = (),
-            bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
             maxfun      = selfA._parameters["MaximumNumberOfSteps"]-1,
             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
@@ -2603,7 +2969,6 @@ def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
             x0          = Xini,
             fprime      = GradientOfCostFunction,
             args        = (),
-            bounds      = RecentredBounds(selfA._parameters["Bounds"], Xb),
             maxfun      = selfA._parameters["MaximumNumberOfSteps"],
             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
             ftol        = selfA._parameters["CostDecrementTolerance"],
@@ -2652,13 +3017,11 @@ def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
     # ----------------------------------------------------------------
     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
-        Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
     else:
-        Minimum = Xb + BHT * numpy.asmatrix(numpy.ravel( Minimum )).T
+        Minimum = Xb + BHT @ Minimum.reshape((-1,1))
     #
-    # Obtention de l'analyse
-    # ----------------------
     Xa = Minimum
+    #--------------------------
     #
     selfA.StoredVariables["Analysis"].store( Xa )
     #
@@ -2673,8 +3036,6 @@ def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         else:
             HXa = Hm( Xa )
     #
-    # Calcul de la covariance d'analyse
-    # ---------------------------------
     if selfA._toStore("APosterioriCovariance") or \
         selfA._toStore("SimulationQuantiles") or \
         selfA._toStore("JacobianMatrixAtOptimum") or \
@@ -2690,25 +3051,7 @@ def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         selfA._toStore("SimulationQuantiles"):
         BI = B.getI()
         RI = R.getI()
-        HessienneI = []
-        nb = Xa.size
-        for i in range(nb):
-            _ee    = numpy.matrix(numpy.zeros(nb)).T
-            _ee[i] = 1.
-            _HtEE  = numpy.dot(HtM,_ee)
-            _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
-            HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
-        HessienneI = numpy.matrix( HessienneI )
-        A = HessienneI.I
-        if min(A.shape) != max(A.shape):
-            raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
-        if (numpy.diag(A) < 0).any():
-            raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
-        if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
-            try:
-                L = numpy.linalg.cholesky( A )
-            except:
-                raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
+        A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
     if selfA._toStore("APosterioriCovariance"):
         selfA.StoredVariables["APosterioriCovariance"].store( A )
     if selfA._toStore("JacobianMatrixAtOptimum"):
@@ -2726,29 +3069,32 @@ def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         selfA._toStore("OMB"):
         d  = Y - HXb
     if selfA._toStore("Innovation"):
-        selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
+        selfA.StoredVariables["Innovation"].store( d )
     if selfA._toStore("BMA"):
         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
     if selfA._toStore("OMA"):
         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
     if selfA._toStore("OMB"):
-        selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
+        selfA.StoredVariables["OMB"].store( d )
     if selfA._toStore("SigmaObs2"):
         TraceR = R.trace(Y.size)
-        selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
+        selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
     if selfA._toStore("MahalanobisConsistency"):
         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
     if selfA._toStore("SimulationQuantiles"):
         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
     if selfA._toStore("SimulatedObservationAtBackground"):
-        selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
+        selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
     if selfA._toStore("SimulatedObservationAtOptimum"):
-        selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
+        selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
     #
     return 0
 
 # ==============================================================================
-def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16"):
+def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
+    VariantM="KalmanFilterFormula16",
+    Hybrid=None,
+    ):
     """
     Stochastic EnKF
     """
@@ -2786,24 +3132,23 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16"
     #
     __n = Xb.size
     __m = selfA._parameters["NumberOfMembers"]
+    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
+    previousJMinimum = numpy.finfo(float).max
     #
     if hasattr(R,"asfullmatrix"): Rn = R.asfullmatrix(__p)
     else:                         Rn = R
     #
     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
-        Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
+        if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
+        else:                         Pn = B
+        Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
         selfA.StoredVariables["Analysis"].store( Xb )
         if selfA._toStore("APosterioriCovariance"):
-            if hasattr(B,"asfullmatrix"):
-                selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
-            else:
-                selfA.StoredVariables["APosterioriCovariance"].store( B )
+            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
         selfA._setInternalState("seed", numpy.random.get_state())
     elif selfA._parameters["nextStep"]:
         Xn = selfA._getInternalState("Xn")
     #
-    previousJMinimum = numpy.finfo(float).max
-    #
     for step in range(duration-1):
         numpy.random.set_state(selfA._getInternalState("seed"))
         if hasattr(Y,"store"):
@@ -2813,11 +3158,11 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16"
         #
         if U is not None:
             if hasattr(U,"store") and len(U)>1:
-                Un = numpy.asmatrix(numpy.ravel( U[step] )).T
+                Un = numpy.ravel( U[step] ).reshape((-1,1))
             elif hasattr(U,"store") and len(U)==1:
-                Un = numpy.asmatrix(numpy.ravel( U[0] )).T
+                Un = numpy.ravel( U[0] ).reshape((-1,1))
             else:
-                Un = numpy.asmatrix(numpy.ravel( U )).T
+                Un = numpy.ravel( U ).reshape((-1,1))
         else:
             Un = None
         #
@@ -2837,7 +3182,7 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16"
                 returnSerieAsArrayMatrix = True )
             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
-                Xn_predicted = Xn_predicted + Cm * Un
+                Xn_predicted = Xn_predicted + Cm @ Un
         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
             # --- > Par principe, M = Id, Q = 0
             Xn_predicted = EMX = Xn
@@ -2846,8 +3191,8 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16"
                 returnSerieAsArrayMatrix = True )
         #
         # Mean of forecast and observation of forecast
-        Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
-        Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
+        Xfm  = EnsembleMean( Xn_predicted )
+        Hfm  = EnsembleMean( HX_predicted )
         #
         #--------------------------
         if VariantM == "KalmanFilterFormula05":
@@ -2887,7 +3232,11 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16"
                 selfA._parameters["InflationFactor"],
                 )
         #
-        Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+        if Hybrid == "E3DVAR":
+            betaf = selfA._parameters["HybridCovarianceEquilibrium"]
+            Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
+        #
+        Xa = EnsembleMean( Xn )
         #--------------------------
         selfA._setInternalState("Xn", Xn)
         selfA._setInternalState("seed", numpy.random.get_state())
@@ -2901,7 +3250,7 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16"
             or selfA._toStore("InnovationAtCurrentAnalysis") \
             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
+            _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
             _Innovation = Ynpu - _HXa
         #
         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
@@ -2933,8 +3282,8 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16"
             or selfA._toStore("CostFunctionJo") \
             or selfA._toStore("CurrentOptimum") \
             or selfA._toStore("APosterioriCovariance"):
-            Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
-            Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
+            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
             J   = Jb + Jo
             selfA.StoredVariables["CostFunctionJb"].store( Jb )
             selfA.StoredVariables["CostFunctionJo"].store( Jo )
@@ -2967,6 +3316,9 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16"
             XaMin               = Xa
             if selfA._toStore("APosterioriCovariance"):
                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
+        # ---> Pour les smoothers
+        if selfA._toStore("CurrentEnsembleState"):
+            selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
     #
     # Stockage final supplémentaire de l'optimum en estimation de paramètres
     # ----------------------------------------------------------------------
@@ -2988,17 +3340,14 @@ def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
     #
     # Initialisations
     # ---------------
-    #
-    # Opérateurs
     Hm = HO["Direct"].appliedTo
     Ha = HO["Adjoint"].appliedInXTo
     #
-    # Utilisation éventuelle d'un vecteur H(Xb) précalculé
     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
-        HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
+        HXb = numpy.asarray(Hm( Xb, HO["AppliedInX"]["HXb"] ))
     else:
-        HXb = Hm( Xb )
-    HXb = numpy.asmatrix(numpy.ravel( HXb )).T
+        HXb = numpy.asarray(Hm( Xb ))
+    HXb = HXb.reshape((-1,1))
     if Y.size != HXb.size:
         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
     if max(Y.shape) != max(HXb.shape):
@@ -3009,23 +3358,20 @@ def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
         selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
     #
-    # Précalcul des inversions de B et R
     BI = B.getI()
     RI = R.getI()
     #
-    # Point de démarrage de l'optimisation
     Xini = selfA._parameters["InitializationPoint"]
     #
     # Définition de la fonction-coût
     # ------------------------------
     def CostFunction(x):
-        _X  = numpy.asmatrix(numpy.ravel( x )).T
+        _X  = numpy.asarray(x).reshape((-1,1))
         if selfA._parameters["StoreInternalVariables"] or \
             selfA._toStore("CurrentState") or \
             selfA._toStore("CurrentOptimum"):
             selfA.StoredVariables["CurrentState"].store( _X )
-        _HX = Hm( _X )
-        _HX = numpy.asmatrix(numpy.ravel( _HX )).T
+        _HX = numpy.asarray(Hm( _X )).reshape((-1,1))
         _Innovation = Y - _HX
         if selfA._toStore("SimulatedObservationAtCurrentState") or \
             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
@@ -3033,8 +3379,8 @@ def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         if selfA._toStore("InnovationAtCurrentState"):
             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
         #
-        Jb  = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
-        Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
+        Jb  = float( 0.5 * (_X - Xb).T * (BI * (_X - Xb)) )
+        Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
         J   = Jb + Jo
         #
         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
@@ -3063,9 +3409,8 @@ def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         return J
     #
     def GradientOfCostFunction(x):
-        _X      = numpy.asmatrix(numpy.ravel( x )).T
-        _HX     = Hm( _X )
-        _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
+        _X      = numpy.asarray(x).reshape((-1,1))
+        _HX     = numpy.asarray(Hm( _X )).reshape((-1,1))
         GradJb  = BI * (_X - Xb)
         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
@@ -3149,9 +3494,8 @@ def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
     #
-    # Obtention de l'analyse
-    # ----------------------
-    Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
+    Xa = Minimum
+    #--------------------------
     #
     selfA.StoredVariables["Analysis"].store( Xa )
     #
@@ -3166,8 +3510,6 @@ def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         else:
             HXa = Hm( Xa )
     #
-    # Calcul de la covariance d'analyse
-    # ---------------------------------
     if selfA._toStore("APosterioriCovariance") or \
         selfA._toStore("SimulationQuantiles") or \
         selfA._toStore("JacobianMatrixAtOptimum") or \
@@ -3181,25 +3523,7 @@ def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         HaM = HaM.reshape(Xa.size,Y.size) # ADAO & check shape
     if selfA._toStore("APosterioriCovariance") or \
         selfA._toStore("SimulationQuantiles"):
-        HessienneI = []
-        nb = Xa.size
-        for i in range(nb):
-            _ee    = numpy.matrix(numpy.zeros(nb)).T
-            _ee[i] = 1.
-            _HtEE  = numpy.dot(HtM,_ee)
-            _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
-            HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
-        HessienneI = numpy.matrix( HessienneI )
-        A = HessienneI.I
-        if min(A.shape) != max(A.shape):
-            raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
-        if (numpy.diag(A) < 0).any():
-            raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
-        if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
-            try:
-                L = numpy.linalg.cholesky( A )
-            except:
-                raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
+        A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
     if selfA._toStore("APosterioriCovariance"):
         selfA.StoredVariables["APosterioriCovariance"].store( A )
     if selfA._toStore("JacobianMatrixAtOptimum"):
@@ -3217,24 +3541,24 @@ def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         selfA._toStore("OMB"):
         d  = Y - HXb
     if selfA._toStore("Innovation"):
-        selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
+        selfA.StoredVariables["Innovation"].store( d )
     if selfA._toStore("BMA"):
         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
     if selfA._toStore("OMA"):
         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
     if selfA._toStore("OMB"):
-        selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
+        selfA.StoredVariables["OMB"].store( d )
     if selfA._toStore("SigmaObs2"):
         TraceR = R.trace(Y.size)
-        selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
+        selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
     if selfA._toStore("MahalanobisConsistency"):
         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
     if selfA._toStore("SimulationQuantiles"):
         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
     if selfA._toStore("SimulatedObservationAtBackground"):
-        selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
+        selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
     if selfA._toStore("SimulatedObservationAtOptimum"):
-        selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
+        selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
     #
     return 0
 
@@ -3259,18 +3583,18 @@ def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
     def Un(_step):
         if U is not None:
             if hasattr(U,"store") and 1<=_step<len(U) :
-                _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
+                _Un = numpy.ravel( U[_step] ).reshape((-1,1))
             elif hasattr(U,"store") and len(U)==1:
-                _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
+                _Un = numpy.ravel( U[0] ).reshape((-1,1))
             else:
-                _Un = numpy.asmatrix(numpy.ravel( U )).T
+                _Un = numpy.ravel( U ).reshape((-1,1))
         else:
             _Un = None
         return _Un
     def CmUn(_xn,_un):
         if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
             _Cm   = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
-            _CmUn = _Cm * _un
+            _CmUn = (_Cm @ _un).reshape((-1,1))
         else:
             _CmUn = 0.
         return _CmUn
@@ -3298,46 +3622,44 @@ def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
     selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
     selfA.DirectInnovation  = [None,] # Le pas 0 n'est pas observé
     def CostFunction(x):
-        _X  = numpy.asmatrix(numpy.ravel( x )).T
+        _X  = numpy.asarray(x).reshape((-1,1))
         if selfA._parameters["StoreInternalVariables"] or \
             selfA._toStore("CurrentState") or \
             selfA._toStore("CurrentOptimum"):
             selfA.StoredVariables["CurrentState"].store( _X )
-        Jb  = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
+        Jb  = float( 0.5 * (_X - Xb).T * (BI * (_X - Xb)) )
         selfA.DirectCalculation = [None,]
         selfA.DirectInnovation  = [None,]
         Jo  = 0.
         _Xn = _X
         for step in range(0,duration-1):
             if hasattr(Y,"store"):
-                _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
+                _Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
             else:
-                _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
+                _Ynpu = numpy.ravel( Y ).reshape((-1,1))
             _Un = Un(step)
             #
             # Etape d'évolution
             if selfA._parameters["EstimationOf"] == "State":
-                _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
+                _Xn = Mm( (_Xn, _Un) ).reshape((-1,1)) + CmUn(_Xn, _Un)
             elif selfA._parameters["EstimationOf"] == "Parameters":
                 pass
             #
             if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
-                __Bounds = ForceNumericBounds( selfA._parameters["Bounds"] )
-                _Xn = numpy.max(numpy.hstack((_Xn,numpy.asmatrix(__Bounds)[:,0])),axis=1)
-                _Xn = numpy.min(numpy.hstack((_Xn,numpy.asmatrix(__Bounds)[:,1])),axis=1)
+                _Xn = ApplyBounds( _Xn, ForceNumericBounds(selfA._parameters["Bounds"]) )
             #
             # Etape de différence aux observations
             if selfA._parameters["EstimationOf"] == "State":
-                _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
+                _YmHMX = _Ynpu - numpy.ravel( Hm( (_Xn, None) ) ).reshape((-1,1))
             elif selfA._parameters["EstimationOf"] == "Parameters":
-                _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
+                _YmHMX = _Ynpu - numpy.ravel( Hm( (_Xn, _Un) ) ).reshape((-1,1)) - CmUn(_Xn, _Un)
             #
             # Stockage de l'état
             selfA.DirectCalculation.append( _Xn )
             selfA.DirectInnovation.append( _YmHMX )
             #
             # Ajout dans la fonctionnelle d'observation
-            Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
+            Jo = Jo + 0.5 * float( _YmHMX.T * (RI * _YmHMX) )
         J = Jb + Jo
         #
         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
@@ -3363,7 +3685,7 @@ def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         return J
     #
     def GradientOfCostFunction(x):
-        _X      = numpy.asmatrix(numpy.ravel( x )).T
+        _X      = numpy.asarray(x).reshape((-1,1))
         GradJb  = BI * (_X - Xb)
         GradJo  = 0.
         for step in range(duration-1,0,-1):
@@ -3377,8 +3699,8 @@ def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
             Ma = EM["Adjoint"].asMatrix(ValueForMethodForm = _Xn)
             Ma = Ma.reshape(_Xn.size,_Xn.size) # ADAO & check shape
             # Calcul du gradient par état adjoint
-            GradJo = GradJo + Ha * RI * _YmHMX # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
-            GradJo = Ma * GradJo               # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
+            GradJo = GradJo + Ha * (RI * _YmHMX) # Équivaut pour Ha linéaire à : Ha( (_Xn, RI * _YmHMX) )
+            GradJo = Ma * GradJo                 # Équivaut pour Ma linéaire à : Ma( (_Xn, GradJo) )
         GradJ = numpy.ravel( GradJb ) - numpy.ravel( GradJo )
         return GradJ
     #
@@ -3462,7 +3784,7 @@ def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
     #
     # Obtention de l'analyse
     # ----------------------
-    Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
+    Xa = Minimum
     #
     selfA.StoredVariables["Analysis"].store( Xa )
     #
@@ -3514,6 +3836,7 @@ def stdkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         RI = R.getI()
     #
     __n = Xb.size
+    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
     #
     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
         Xn = Xb
@@ -3542,33 +3865,33 @@ def stdkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         #
         if U is not None:
             if hasattr(U,"store") and len(U)>1:
-                Un = numpy.asmatrix(numpy.ravel( U[step] )).T
+                Un = numpy.ravel( U[step] ).reshape((-1,1))
             elif hasattr(U,"store") and len(U)==1:
-                Un = numpy.asmatrix(numpy.ravel( U[0] )).T
+                Un = numpy.ravel( U[0] ).reshape((-1,1))
             else:
-                Un = numpy.asmatrix(numpy.ravel( U )).T
+                Un = numpy.ravel( U ).reshape((-1,1))
         else:
             Un = None
         #
         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
-            Xn_predicted = Mt * Xn
+            Xn_predicted = Mt @ Xn
             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
-                Xn_predicted = Xn_predicted + Cm * Un
-            Pn_predicted = Q + Mt * Pn * Ma
+                Xn_predicted = Xn_predicted + Cm @ Un
+            Pn_predicted = Q + Mt * (Pn * Ma)
         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
             # --- > Par principe, M = Id, Q = 0
             Xn_predicted = Xn
             Pn_predicted = Pn
         #
         if selfA._parameters["EstimationOf"] == "State":
-            HX_predicted = Ht * Xn_predicted
+            HX_predicted = Ht @ Xn_predicted
             _Innovation  = Ynpu - HX_predicted
         elif selfA._parameters["EstimationOf"] == "Parameters":
-            HX_predicted = Ht * Xn_predicted
+            HX_predicted = Ht @ Xn_predicted
             _Innovation  = Ynpu - HX_predicted
             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
-                _Innovation = _Innovation - Cm * Un
+                _Innovation = _Innovation - Cm @ Un
         #
         Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
         Xn = Xn_predicted + Kn * _Innovation
@@ -3609,8 +3932,8 @@ def stdkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
             or selfA._toStore("CostFunctionJo") \
             or selfA._toStore("CurrentOptimum") \
             or selfA._toStore("APosterioriCovariance"):
-            Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
-            Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
+            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
             J   = Jb + Jo
             selfA.StoredVariables["CostFunctionJb"].store( Jb )
             selfA.StoredVariables["CostFunctionJo"].store( Jo )
@@ -3657,13 +3980,12 @@ def stdkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
     return 0
 
 # ==============================================================================
-def uckf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
+def uskf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
     """
     Unscented Kalman Filter
     """
     if selfA._parameters["EstimationOf"] == "Parameters":
         selfA._parameters["StoreInternalVariables"] = True
-    selfA._parameters["Bounds"] = ForceNumericBounds( selfA._parameters["Bounds"] )
     #
     L     = Xb.size
     Alpha = selfA._parameters["Alpha"]
@@ -3718,6 +4040,7 @@ def uckf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         RI = R.getI()
     #
     __n = Xb.size
+    nbPreviousSteps  = len(selfA.StoredVariables["Analysis"])
     #
     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
         Xn = Xb
@@ -3753,84 +4076,60 @@ def uckf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         else:
             Un = None
         #
-        Pndemi = numpy.linalg.cholesky(Pn)
+        Pndemi = numpy.real(scipy.linalg.sqrtm(Pn))
         Xnp = numpy.hstack([Xn, Xn+Gamma*Pndemi, Xn-Gamma*Pndemi])
         nbSpts = 2*Xn.size+1
         #
-        if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
-            for point in range(nbSpts):
-                Xnp[:,point] = numpy.max(numpy.hstack((Xnp[:,point],numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
-                Xnp[:,point] = numpy.min(numpy.hstack((Xnp[:,point],numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
-        #
         XEtnnp = []
         for point in range(nbSpts):
             if selfA._parameters["EstimationOf"] == "State":
-                XEtnnpi = numpy.asmatrix(numpy.ravel( Mm( (Xnp[:,point], Un) ) )).T
+                XEtnnpi = numpy.asarray( Mm( (Xnp[:,point], Un) ) ).reshape((-1,1))
                 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
                     Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
-                    XEtnnpi = XEtnnpi + Cm * Un
-                if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
-                    XEtnnpi = numpy.max(numpy.hstack((XEtnnpi,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
-                    XEtnnpi = numpy.min(numpy.hstack((XEtnnpi,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
+                    XEtnnpi = XEtnnpi + Cm @ Un
             elif selfA._parameters["EstimationOf"] == "Parameters":
                 # --- > Par principe, M = Id, Q = 0
                 XEtnnpi = Xnp[:,point]
-            XEtnnp.append( XEtnnpi )
-        XEtnnp = numpy.hstack( XEtnnp )
+            XEtnnp.append( numpy.ravel(XEtnnpi).reshape((-1,1)) )
+        XEtnnp = numpy.concatenate( XEtnnp, axis=1 )
         #
-        Xncm = numpy.matrix( XEtnnp.getA()*numpy.array(Wm) ).sum(axis=1)
-        #
-        if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
-            Xncm = numpy.max(numpy.hstack((Xncm,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
-            Xncm = numpy.min(numpy.hstack((Xncm,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
+        Xncm = ( XEtnnp * Wm ).sum(axis=1)
         #
         if selfA._parameters["EstimationOf"] == "State":        Pnm = Q
         elif selfA._parameters["EstimationOf"] == "Parameters": Pnm = 0.
         for point in range(nbSpts):
-            Pnm += Wc[i] * (XEtnnp[:,point]-Xncm) * (XEtnnp[:,point]-Xncm).T
-        #
-        if selfA._parameters["EstimationOf"] == "Parameters" and selfA._parameters["Bounds"] is not None:
-            Pnmdemi = selfA._parameters["Reconditioner"] * numpy.linalg.cholesky(Pnm)
-        else:
-            Pnmdemi = numpy.linalg.cholesky(Pnm)
+            Pnm += Wc[i] * ((XEtnnp[:,point]-Xncm).reshape((-1,1)) * (XEtnnp[:,point]-Xncm))
         #
-        Xnnp = numpy.hstack([Xncm, Xncm+Gamma*Pnmdemi, Xncm-Gamma*Pnmdemi])
+        Pnmdemi = numpy.real(scipy.linalg.sqrtm(Pnm))
         #
-        if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
-            for point in range(nbSpts):
-                Xnnp[:,point] = numpy.max(numpy.hstack((Xnnp[:,point],numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
-                Xnnp[:,point] = numpy.min(numpy.hstack((Xnnp[:,point],numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
+        Xnnp = numpy.hstack([Xncm.reshape((-1,1)), Xncm.reshape((-1,1))+Gamma*Pnmdemi, Xncm.reshape((-1,1))-Gamma*Pnmdemi])
         #
         Ynnp = []
         for point in range(nbSpts):
             if selfA._parameters["EstimationOf"] == "State":
-                Ynnpi = numpy.asmatrix(numpy.ravel( Hm( (Xnnp[:,point], None) ) )).T
+                Ynnpi = Hm( (Xnnp[:,point], None) )
             elif selfA._parameters["EstimationOf"] == "Parameters":
-                Ynnpi = numpy.asmatrix(numpy.ravel( Hm( (Xnnp[:,point], Un) ) )).T
-            Ynnp.append( Ynnpi )
-        Ynnp = numpy.hstack( Ynnp )
+                Ynnpi = Hm( (Xnnp[:,point], Un) )
+            Ynnp.append( numpy.ravel(Ynnpi).reshape((-1,1)) )
+        Ynnp = numpy.concatenate( Ynnp, axis=1 )
         #
-        Yncm = numpy.matrix( Ynnp.getA()*numpy.array(Wm) ).sum(axis=1)
+        Yncm = ( Ynnp * Wm ).sum(axis=1)
         #
         Pyyn = R
         Pxyn = 0.
         for point in range(nbSpts):
-            Pyyn += Wc[i] * (Ynnp[:,point]-Yncm) * (Ynnp[:,point]-Yncm).T
-            Pxyn += Wc[i] * (Xnnp[:,point]-Xncm) * (Ynnp[:,point]-Yncm).T
+            Pyyn += Wc[i] * ((Ynnp[:,point]-Yncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
+            Pxyn += Wc[i] * ((Xnnp[:,point]-Xncm).reshape((-1,1)) * (Ynnp[:,point]-Yncm))
         #
-        _Innovation  = Ynpu - Yncm
+        _Innovation  = Ynpu - Yncm.reshape((-1,1))
         if selfA._parameters["EstimationOf"] == "Parameters":
             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
-                _Innovation = _Innovation - Cm * Un
+                _Innovation = _Innovation - Cm @ Un
         #
         Kn = Pxyn * Pyyn.I
-        Xn = Xncm + Kn * _Innovation
+        Xn = Xncm.reshape((-1,1)) + Kn * _Innovation
         Pn = Pnm - Kn * Pyyn * Kn.T
         #
-        if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
-            Xn = numpy.max(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,0])),axis=1)
-            Xn = numpy.min(numpy.hstack((Xn,numpy.asmatrix(selfA._parameters["Bounds"])[:,1])),axis=1)
-        #
         Xa = Xn # Pointeurs
         #--------------------------
         selfA._setInternalState("Xn", Xn)
@@ -3866,8 +4165,8 @@ def uckf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
             or selfA._toStore("CostFunctionJo") \
             or selfA._toStore("CurrentOptimum") \
             or selfA._toStore("APosterioriCovariance"):
-            Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
-            Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
+            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
             J   = Jb + Jo
             selfA.StoredVariables["CostFunctionJb"].store( Jb )
             selfA.StoredVariables["CostFunctionJo"].store( Jo )
@@ -3921,29 +4220,24 @@ def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
     #
     # Initialisations
     # ---------------
-    #
-    # Opérateurs
     Hm = HO["Direct"].appliedTo
     Ha = HO["Adjoint"].appliedInXTo
     #
-    # Précalcul des inversions de B et R
     BT = B.getT()
     RI = R.getI()
     #
-    # Point de démarrage de l'optimisation
-    Xini = numpy.zeros(Xb.shape)
+    Xini = numpy.zeros(Xb.size)
     #
     # Définition de la fonction-coût
     # ------------------------------
     def CostFunction(v):
-        _V = numpy.asmatrix(numpy.ravel( v )).T
-        _X = Xb + B * _V
+        _V = numpy.asarray(v).reshape((-1,1))
+        _X = Xb + (B @ _V).reshape((-1,1))
         if selfA._parameters["StoreInternalVariables"] or \
             selfA._toStore("CurrentState") or \
             selfA._toStore("CurrentOptimum"):
             selfA.StoredVariables["CurrentState"].store( _X )
-        _HX = Hm( _X )
-        _HX = numpy.asmatrix(numpy.ravel( _HX )).T
+        _HX = numpy.asarray(Hm( _X )).reshape((-1,1))
         _Innovation = Y - _HX
         if selfA._toStore("SimulatedObservationAtCurrentState") or \
             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
@@ -3951,8 +4245,8 @@ def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         if selfA._toStore("InnovationAtCurrentState"):
             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
         #
-        Jb  = float( 0.5 * _V.T * BT * _V )
-        Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
+        Jb  = float( 0.5 * _V.T * (BT * _V) )
+        Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
         J   = Jb + Jo
         #
         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
@@ -3981,10 +4275,9 @@ def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         return J
     #
     def GradientOfCostFunction(v):
-        _V = numpy.asmatrix(numpy.ravel( v )).T
-        _X = Xb + B * _V
-        _HX     = Hm( _X )
-        _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
+        _V = numpy.asarray(v).reshape((-1,1))
+        _X = Xb + (B @ _V).reshape((-1,1))
+        _HX     = numpy.asarray(Hm( _X )).reshape((-1,1))
         GradJb  = BT * _V
         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
@@ -4067,13 +4360,11 @@ def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
     # ----------------------------------------------------------------
     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
-        Minimum = numpy.asmatrix(numpy.ravel( Minimum )).T
     else:
-        Minimum = Xb + B * numpy.asmatrix(numpy.ravel( Minimum )).T
+        Minimum = Xb + B * Minimum.reshape((-1,1)) # Pas @
     #
-    # Obtention de l'analyse
-    # ----------------------
     Xa = Minimum
+    #--------------------------
     #
     selfA.StoredVariables["Analysis"].store( Xa )
     #
@@ -4088,8 +4379,6 @@ def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         else:
             HXa = Hm( Xa )
     #
-    # Calcul de la covariance d'analyse
-    # ---------------------------------
     if selfA._toStore("APosterioriCovariance") or \
         selfA._toStore("SimulationQuantiles") or \
         selfA._toStore("JacobianMatrixAtOptimum") or \
@@ -4104,25 +4393,7 @@ def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
     if selfA._toStore("APosterioriCovariance") or \
         selfA._toStore("SimulationQuantiles"):
         BI = B.getI()
-        HessienneI = []
-        nb = Xa.size
-        for i in range(nb):
-            _ee    = numpy.matrix(numpy.zeros(nb)).T
-            _ee[i] = 1.
-            _HtEE  = numpy.dot(HtM,_ee)
-            _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
-            HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
-        HessienneI = numpy.matrix( HessienneI )
-        A = HessienneI.I
-        if min(A.shape) != max(A.shape):
-            raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
-        if (numpy.diag(A) < 0).any():
-            raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(selfA._name,))
-        if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug
-            try:
-                L = numpy.linalg.cholesky( A )
-            except:
-                raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(selfA._name,))
+        A = HessienneEstimation(Xa.size, HaM, HtM, BI, RI)
     if selfA._toStore("APosterioriCovariance"):
         selfA.StoredVariables["APosterioriCovariance"].store( A )
     if selfA._toStore("JacobianMatrixAtOptimum"):
@@ -4140,24 +4411,24 @@ def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         selfA._toStore("OMB"):
         d  = Y - HXb
     if selfA._toStore("Innovation"):
-        selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
+        selfA.StoredVariables["Innovation"].store( d )
     if selfA._toStore("BMA"):
         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
     if selfA._toStore("OMA"):
         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
     if selfA._toStore("OMB"):
-        selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
+        selfA.StoredVariables["OMB"].store( d )
     if selfA._toStore("SigmaObs2"):
         TraceR = R.trace(Y.size)
-        selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
+        selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
     if selfA._toStore("MahalanobisConsistency"):
         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
     if selfA._toStore("SimulationQuantiles"):
         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
     if selfA._toStore("SimulatedObservationAtBackground"):
-        selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
+        selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
     if selfA._toStore("SimulatedObservationAtOptimum"):
-        selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
+        selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
     #
     return 0