Salome HOME
Minor internal modifications, documentation and bounds corrections
authorJean-Philippe ARGAUD <jean-philippe.argaud@edf.fr>
Mon, 30 Aug 2021 06:05:40 +0000 (08:05 +0200)
committerJean-Philippe ARGAUD <jean-philippe.argaud@edf.fr>
Mon, 30 Aug 2021 06:05:40 +0000 (08:05 +0200)
README.txt
doc/en/ref_algorithm_UnscentedKalmanFilter.rst
doc/fr/ref_algorithm_UnscentedKalmanFilter.rst
src/daComposant/daAlgorithms/QuantileRegression.py
src/daComposant/daCore/NumericObjects.py
test/test6901/Verification_des_Assimilation_Algorithms.py

index f0915ed0cbb2155d1681c1d71dd7af646f1c98de..ed0e7cfa5a7100af928ad3ac5afd83a090ca1083 100644 (file)
@@ -37,13 +37,13 @@ commands is the following one::
     from numpy import array, matrix
     from adao import adaoBuilder
     case = adaoBuilder.New()
-    case.set( 'AlgorithmParameters', Algorithm='3DVAR' )
-    case.set( 'Background',          Vector=[0, 1, 2] )
-    case.set( 'BackgroundError',     ScalarSparseMatrix=1.0 )
-    case.set( 'Observation',         Vector=array([0.5, 1.5, 2.5]) )
-    case.set( 'ObservationError',    DiagonalSparseMatrix='1 1 1' )
-    case.set( 'ObservationOperator', Matrix='1 0 0;0 2 0;0 0 3' )
-    case.set( 'Observer',            Variable="Analysis", Template="ValuePrinter" )
+    case.set( 'AlgorithmParameters', Algorithm = '3DVAR' )
+    case.set( 'Background',          Vector = [0, 1, 2] )
+    case.set( 'BackgroundError',     ScalarSparseMatrix = 1.0 )
+    case.set( 'Observation',         Vector = array([0.5, 1.5, 2.5]) )
+    case.set( 'ObservationError',    DiagonalSparseMatrix = '1 1 1' )
+    case.set( 'ObservationOperator', Matrix = '1 0 0;0 2 0;0 0 3' )
+    case.set( 'Observer',            Variable = "Analysis", Template = "ValuePrinter" )
     case.execute()
 
 The result of running these commands in SALOME (either as a SALOME
index eeef062c8578b1695892e53d164892d04813d3c2..828843cdc3d4d9b736bff912dffa483961f7439f 100644 (file)
@@ -96,11 +96,22 @@ StoreSupplementaryCalculations
   "APosterioriVariances",
   "BMA",
   "CostFunctionJ",
+  "CostFunctionJAtCurrentOptimum",
   "CostFunctionJb",
+  "CostFunctionJbAtCurrentOptimum",
   "CostFunctionJo",
+  "CostFunctionJoAtCurrentOptimum",
   "CurrentIterationNumber",
+  "CurrentOptimum",
   "CurrentState",
+  "ForecastCovariance",
+  "ForecastState",
+  "IndexOfOptimum",
+  "InnovationAtCurrentAnalysis",
   "InnovationAtCurrentState",
+  "SimulatedObservationAtCurrentAnalysis",
+  "SimulatedObservationAtCurrentOptimum",
+  "SimulatedObservationAtCurrentState",
   ].
 
   Example :
@@ -128,16 +139,38 @@ StoreSupplementaryCalculations
 
 .. include:: snippets/CostFunctionJ.rst
 
+.. include:: snippets/CostFunctionJAtCurrentOptimum.rst
+
 .. include:: snippets/CostFunctionJb.rst
 
+.. include:: snippets/CostFunctionJbAtCurrentOptimum.rst
+
 .. include:: snippets/CostFunctionJo.rst
 
+.. include:: snippets/CostFunctionJoAtCurrentOptimum.rst
+
 .. include:: snippets/CurrentIterationNumber.rst
 
+.. include:: snippets/CurrentOptimum.rst
+
 .. include:: snippets/CurrentState.rst
 
+.. include:: snippets/ForecastCovariance.rst
+
+.. include:: snippets/ForecastState.rst
+
+.. include:: snippets/IndexOfOptimum.rst
+
+.. include:: snippets/InnovationAtCurrentAnalysis.rst
+
 .. include:: snippets/InnovationAtCurrentState.rst
 
+.. include:: snippets/SimulatedObservationAtCurrentAnalysis.rst
+
+.. include:: snippets/SimulatedObservationAtCurrentOptimum.rst
+
+.. include:: snippets/SimulatedObservationAtCurrentState.rst
+
 .. ------------------------------------ ..
 .. include:: snippets/Header2Algo06.rst
 
index e873868d8a3ce1083b3393beea410e2334b05b0f..25338d7e137abc6fafe766ad2f7d54da84a4c949 100644 (file)
@@ -98,11 +98,22 @@ StoreSupplementaryCalculations
   "APosterioriVariances",
   "BMA",
   "CostFunctionJ",
+  "CostFunctionJAtCurrentOptimum",
   "CostFunctionJb",
+  "CostFunctionJbAtCurrentOptimum",
   "CostFunctionJo",
+  "CostFunctionJoAtCurrentOptimum",
   "CurrentIterationNumber",
+  "CurrentOptimum",
   "CurrentState",
+  "ForecastCovariance",
+  "ForecastState",
+  "IndexOfOptimum",
+  "InnovationAtCurrentAnalysis",
   "InnovationAtCurrentState",
+  "SimulatedObservationAtCurrentAnalysis",
+  "SimulatedObservationAtCurrentOptimum",
+  "SimulatedObservationAtCurrentState",
   ].
 
   Exemple :
@@ -130,16 +141,38 @@ StoreSupplementaryCalculations
 
 .. include:: snippets/CostFunctionJ.rst
 
+.. include:: snippets/CostFunctionJAtCurrentOptimum.rst
+
 .. include:: snippets/CostFunctionJb.rst
 
+.. include:: snippets/CostFunctionJbAtCurrentOptimum.rst
+
 .. include:: snippets/CostFunctionJo.rst
 
+.. include:: snippets/CostFunctionJoAtCurrentOptimum.rst
+
 .. include:: snippets/CurrentIterationNumber.rst
 
+.. include:: snippets/CurrentOptimum.rst
+
 .. include:: snippets/CurrentState.rst
 
+.. include:: snippets/ForecastCovariance.rst
+
+.. include:: snippets/ForecastState.rst
+
+.. include:: snippets/IndexOfOptimum.rst
+
+.. include:: snippets/InnovationAtCurrentAnalysis.rst
+
 .. include:: snippets/InnovationAtCurrentState.rst
 
+.. include:: snippets/SimulatedObservationAtCurrentAnalysis.rst
+
+.. include:: snippets/SimulatedObservationAtCurrentOptimum.rst
+
+.. include:: snippets/SimulatedObservationAtCurrentState.rst
+
 .. ------------------------------------ ..
 .. include:: snippets/Header2Algo06.rst
 
index 37ea503c12e8fdd1dd2a61e0a86715b38bd46dc3..1d1cda905c158a20dedd9fd71b6b8af7e1e1248c 100644 (file)
@@ -103,6 +103,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
 
     def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
         self._pre_run(Parameters, Xb, Y, U, HO, EM, CM, R, B, Q)
+        self._parameters["Bounds"] = NumericObjects.ForceNumericBounds( self._parameters["Bounds"] )
         #
         Hm = HO["Direct"].appliedTo
         #
index 2c862af4cb7ca74edb102471fc1fbd1cd362d0dd..c9193bf45ad27207b4fa6f5d8d78da3fad4d6c0d 100644 (file)
@@ -662,37 +662,36 @@ def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None):
     else:
         LBounds = None
     if LBounds is not None:
-        LBounds = numpy.asmatrix(ForceNumericBounds( LBounds ))
+        LBounds = ForceNumericBounds( LBounds )
+    _Xa = numpy.ravel(Xa)
     #
     # Échantillonnage des états
     YfQ  = None
     EXr  = None
-    if selfA._parameters["SimulationForQuantiles"] == "Linear" and HXa is not None:
-        HXa  = numpy.matrix(numpy.ravel( HXa )).T
     for i in range(nbsamples):
-        if selfA._parameters["SimulationForQuantiles"] == "Linear" and HtM is not None:
-            dXr = numpy.matrix(numpy.random.multivariate_normal(numpy.ravel(Xa),A) - numpy.ravel(Xa)).T
+        if selfA._parameters["SimulationForQuantiles"] == "Linear" and HtM is not None and HXa is not None:
+            dXr = (numpy.random.multivariate_normal(_Xa,A) - _Xa).reshape((-1,1))
             if LBounds is not None: # "EstimateProjection" par défaut
-                dXr = numpy.max(numpy.hstack((dXr,LBounds[:,0]) - Xa),axis=1)
-                dXr = numpy.min(numpy.hstack((dXr,LBounds[:,1]) - Xa),axis=1)
-            dYr = numpy.matrix(numpy.ravel( HtM * dXr )).T
-            Yr = HXa + dYr
-            if selfA._toStore("SampledStateForQuantiles"): Xr = Xa + dXr
+                dXr = numpy.max(numpy.hstack((dXr,LBounds[:,0].reshape((-1,1))) - Xa),axis=1)
+                dXr = numpy.min(numpy.hstack((dXr,LBounds[:,1].reshape((-1,1))) - Xa),axis=1)
+            dYr = HtM @ dXr
+            Yr = HXa.reshape((-1,1)) + dYr
+            if selfA._toStore("SampledStateForQuantiles"): Xr = _Xa + numpy.ravel(dXr)
         elif selfA._parameters["SimulationForQuantiles"] == "NonLinear" and Hm is not None:
-            Xr = numpy.matrix(numpy.random.multivariate_normal(numpy.ravel(Xa),A)).T
+            Xr = numpy.random.multivariate_normal(_Xa,A)
             if LBounds is not None: # "EstimateProjection" par défaut
-                Xr = numpy.max(numpy.hstack((Xr,LBounds[:,0])),axis=1)
-                Xr = numpy.min(numpy.hstack((Xr,LBounds[:,1])),axis=1)
-            Yr = numpy.matrix(numpy.ravel( Hm( Xr ) )).T
+                Xr = numpy.max(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,0].reshape((-1,1)))),axis=1)
+                Xr = numpy.min(numpy.hstack((Xr.reshape((-1,1)),LBounds[:,1].reshape((-1,1)))),axis=1)
+            Yr = Hm( Xr )
         else:
             raise ValueError("Quantile simulations has only to be Linear or NonLinear.")
         #
         if YfQ is None:
-            YfQ = Yr
-            if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.ravel(Xr)
+            YfQ = Yr.reshape((-1,1))
+            if selfA._toStore("SampledStateForQuantiles"): EXr = Xr.reshape((-1,1))
         else:
-            YfQ = numpy.hstack((YfQ,Yr))
-            if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.vstack((EXr,numpy.ravel(Xr)))
+            YfQ = numpy.hstack((YfQ,Yr.reshape((-1,1))))
+            if selfA._toStore("SampledStateForQuantiles"): EXr = numpy.hstack((EXr,Xr.reshape((-1,1))))
     #
     # Extraction des quantiles
     YfQ.sort(axis=-1)
@@ -700,11 +699,12 @@ def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None):
     for quantile in selfA._parameters["Quantiles"]:
         if not (0. <= float(quantile) <= 1.): continue
         indice = int(nbsamples * float(quantile) - 1./nbsamples)
-        if YQ is None: YQ = YfQ[:,indice]
-        else:          YQ = numpy.hstack((YQ,YfQ[:,indice]))
-    selfA.StoredVariables["SimulationQuantiles"].store( YQ )
+        if YQ is None: YQ = YfQ[:,indice].reshape((-1,1))
+        else:          YQ = numpy.hstack((YQ,YfQ[:,indice].reshape((-1,1))))
+    if YQ is not None: # Liste non vide de quantiles
+        selfA.StoredVariables["SimulationQuantiles"].store( YQ )
     if selfA._toStore("SampledStateForQuantiles"):
-        selfA.StoredVariables["SampledStateForQuantiles"].store( EXr.T )
+        selfA.StoredVariables["SampledStateForQuantiles"].store( EXr )
     #
     return 0
 
@@ -1844,6 +1844,9 @@ def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
             XaMin               = Xa
             if selfA._toStore("APosterioriCovariance"):
                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
+        # ---> Pour les smoothers
+        if selfA._toStore("CurrentEnsembleState"):
+            selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
     #
     # Stockage final supplémentaire de l'optimum en estimation de paramètres
     # ----------------------------------------------------------------------
@@ -2344,6 +2347,9 @@ def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
             XaMin               = Xa
             if selfA._toStore("APosterioriCovariance"):
                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
+        # ---> Pour les smoothers
+        if selfA._toStore("CurrentEnsembleState"):
+            selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
     #
     # Stockage final supplémentaire de l'optimum en estimation de paramètres
     # ----------------------------------------------------------------------
@@ -2791,13 +2797,12 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16"
     else:                         Rn = R
     #
     if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
-        Xn = EnsembleOfBackgroundPerturbations( Xb, None, __m )
+        if hasattr(B,"asfullmatrix"): Pn = B.asfullmatrix(__n)
+        else:                         Pn = B
+        Xn = EnsembleOfBackgroundPerturbations( Xb, Pn, __m )
         selfA.StoredVariables["Analysis"].store( Xb )
         if selfA._toStore("APosterioriCovariance"):
-            if hasattr(B,"asfullmatrix"):
-                selfA.StoredVariables["APosterioriCovariance"].store( B.asfullmatrix(__n) )
-            else:
-                selfA.StoredVariables["APosterioriCovariance"].store( B )
+            selfA.StoredVariables["APosterioriCovariance"].store( Pn )
         selfA._setInternalState("seed", numpy.random.get_state())
     elif selfA._parameters["nextStep"]:
         Xn = selfA._getInternalState("Xn")
@@ -2967,6 +2972,9 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16"
             XaMin               = Xa
             if selfA._toStore("APosterioriCovariance"):
                 covarianceXaMin = selfA.StoredVariables["APosterioriCovariance"][-1]
+        # ---> Pour les smoothers
+        if selfA._toStore("CurrentEnsembleState"):
+            selfA.StoredVariables["CurrentEnsembleState"].store( Xn )
     #
     # Stockage final supplémentaire de l'optimum en estimation de paramètres
     # ----------------------------------------------------------------------
@@ -2998,7 +3006,7 @@ def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
     else:
         HXb = Hm( Xb )
-    HXb = numpy.asmatrix(numpy.ravel( HXb )).T
+    HXb = HXb.reshape((-1,1))
     if Y.size != HXb.size:
         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
     if max(Y.shape) != max(HXb.shape):
@@ -3019,13 +3027,12 @@ def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
     # Définition de la fonction-coût
     # ------------------------------
     def CostFunction(x):
-        _X  = numpy.asmatrix(numpy.ravel( x )).T
+        _X  = numpy.ravel( x ).reshape((-1,1))
         if selfA._parameters["StoreInternalVariables"] or \
             selfA._toStore("CurrentState") or \
             selfA._toStore("CurrentOptimum"):
             selfA.StoredVariables["CurrentState"].store( _X )
-        _HX = Hm( _X )
-        _HX = numpy.asmatrix(numpy.ravel( _HX )).T
+        _HX = Hm( _X ).reshape((-1,1))
         _Innovation = Y - _HX
         if selfA._toStore("SimulatedObservationAtCurrentState") or \
             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
@@ -3033,8 +3040,8 @@ def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         if selfA._toStore("InnovationAtCurrentState"):
             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
         #
-        Jb  = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
-        Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
+        Jb  = float( 0.5 * (_X - Xb).T * (BI * (_X - Xb)) )
+        Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
         J   = Jb + Jo
         #
         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
@@ -3063,9 +3070,9 @@ def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         return J
     #
     def GradientOfCostFunction(x):
-        _X      = numpy.asmatrix(numpy.ravel( x )).T
+        _X      = numpy.ravel( x ).reshape((-1,1))
         _HX     = Hm( _X )
-        _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
+        _HX     = numpy.ravel( _HX ).reshape((-1,1))
         GradJb  = BI * (_X - Xb)
         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
@@ -3149,9 +3156,8 @@ def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
     #
-    # Obtention de l'analyse
-    # ----------------------
-    Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
+    Xa = Minimum
+    #--------------------------
     #
     selfA.StoredVariables["Analysis"].store( Xa )
     #
@@ -3166,8 +3172,6 @@ def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         else:
             HXa = Hm( Xa )
     #
-    # Calcul de la covariance d'analyse
-    # ---------------------------------
     if selfA._toStore("APosterioriCovariance") or \
         selfA._toStore("SimulationQuantiles") or \
         selfA._toStore("JacobianMatrixAtOptimum") or \
@@ -3184,13 +3188,11 @@ def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         HessienneI = []
         nb = Xa.size
         for i in range(nb):
-            _ee    = numpy.matrix(numpy.zeros(nb)).T
+            _ee    = numpy.zeros(nb)
             _ee[i] = 1.
             _HtEE  = numpy.dot(HtM,_ee)
-            _HtEE  = numpy.asmatrix(numpy.ravel( _HtEE )).T
-            HessienneI.append( numpy.ravel( BI*_ee + HaM * (RI * _HtEE) ) )
-        HessienneI = numpy.matrix( HessienneI )
-        A = HessienneI.I
+            HessienneI.append( numpy.ravel( BI * _ee.reshape((-1,1)) + HaM * (RI * _HtEE.reshape((-1,1))) ) )
+        A = numpy.linalg.inv(numpy.array( HessienneI ))
         if min(A.shape) != max(A.shape):
             raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(selfA._name,str(A.shape)))
         if (numpy.diag(A) < 0).any():
@@ -3209,32 +3211,30 @@ def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         elif (Y.size >  Xb.size): KG = (BI + numpy.dot(HaM, RI * HtM)).I * HaM * RI
         selfA.StoredVariables["KalmanGainAtOptimum"].store( KG )
     #
-    # Calculs et/ou stockages supplémentaires
-    # ---------------------------------------
     if selfA._toStore("Innovation") or \
         selfA._toStore("SigmaObs2") or \
         selfA._toStore("MahalanobisConsistency") or \
         selfA._toStore("OMB"):
         d  = Y - HXb
     if selfA._toStore("Innovation"):
-        selfA.StoredVariables["Innovation"].store( numpy.ravel(d) )
+        selfA.StoredVariables["Innovation"].store( d )
     if selfA._toStore("BMA"):
         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
     if selfA._toStore("OMA"):
         selfA.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
     if selfA._toStore("OMB"):
-        selfA.StoredVariables["OMB"].store( numpy.ravel(d) )
+        selfA.StoredVariables["OMB"].store( d )
     if selfA._toStore("SigmaObs2"):
         TraceR = R.trace(Y.size)
-        selfA.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(Y)).T-numpy.asmatrix(numpy.ravel(HXa)).T)) ) / TraceR )
+        selfA.StoredVariables["SigmaObs2"].store( float( (d.T @ (numpy.ravel(Y)-numpy.ravel(HXa))) ) / TraceR )
     if selfA._toStore("MahalanobisConsistency"):
         selfA.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/d.size ) )
     if selfA._toStore("SimulationQuantiles"):
         QuantilesEstimations(selfA, A, Xa, HXa, Hm, HtM)
     if selfA._toStore("SimulatedObservationAtBackground"):
-        selfA.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
+        selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
     if selfA._toStore("SimulatedObservationAtOptimum"):
-        selfA.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
+        selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
     #
     return 0
 
index f83cc40c50592178b326800e2b5d3d61ed945a46..a6a0bce3ecc81222dbae4b8b34960c46d6ac6d55 100644 (file)
@@ -108,7 +108,7 @@ class Test_Adao(unittest.TestCase):
         verify_similarity_of_algo_results(("3DVAR", "Blue", "ExtendedBlue", "4DVAR", "DerivativeFreeOptimization"), Xa, 5.e-5)
         verify_similarity_of_algo_results(("LinearLeastSquares", "NonLinearLeastSquares"), Xa, 5.e-7)
         verify_similarity_of_algo_results(("KalmanFilter", "ExtendedKalmanFilter", "UnscentedKalmanFilter"), Xa, 1.e-14)
-        verify_similarity_of_algo_results(("KalmanFilter", "EnsembleKalmanFilter"), Xa, 7.e-2)
+        verify_similarity_of_algo_results(("KalmanFilter", "EnsembleKalmanFilter"), Xa, 2.e-1)
         print("  Les resultats obtenus sont corrects.")
         print("")
         #
@@ -181,8 +181,8 @@ class Test_Adao(unittest.TestCase):
         msg = "Tests des ecarts attendus :"
         print(msg+"\n"+"="*len(msg))
         verify_similarity_of_algo_results(("3DVAR", "Blue", "ExtendedBlue", "4DVAR", "DerivativeFreeOptimization"), Xa, 5.e-5)
-        verify_similarity_of_algo_results(("KalmanFilter", "ExtendedKalmanFilter", "UnscentedKalmanFilter"), Xa, 1.e14)
-        verify_similarity_of_algo_results(("KalmanFilter", "EnsembleKalmanFilter"), Xa, 7.e-2)
+        verify_similarity_of_algo_results(("KalmanFilter", "ExtendedKalmanFilter", "UnscentedKalmanFilter"), Xa, 2.e-14)
+        verify_similarity_of_algo_results(("KalmanFilter", "EnsembleKalmanFilter"), Xa, 2e-1)
         print("  Les resultats obtenus sont corrects.")
         print("")
         #