]> SALOME platform Git repositories - modules/adao.git/commitdiff
Salome HOME
Minor documentation and code review corrections (3)
authorJean-Philippe ARGAUD <jean-philippe.argaud@edf.fr>
Tue, 16 Nov 2021 17:28:01 +0000 (18:28 +0100)
committerJean-Philippe ARGAUD <jean-philippe.argaud@edf.fr>
Tue, 16 Nov 2021 17:28:01 +0000 (18:28 +0100)
Documentation and code review, and safely remove some deprecated
numpy.matrix

18 files changed:
doc/en/tutorials_in_salome.rst
doc/fr/tutorials_in_salome.rst
examples/daSalome/test005_ADAO_Operators.comm.in
examples/daSalome/test005_ADAO_Operators.py.in
examples/daSkeletons/External_data_definition_by_scripts/Script_UserPostAnalysis.py
src/daComposant/daAlgorithms/Blue.py
src/daComposant/daAlgorithms/DerivativeFreeOptimization.py
src/daComposant/daAlgorithms/DifferentialEvolution.py
src/daComposant/daAlgorithms/EnsembleBlue.py
src/daComposant/daAlgorithms/EnsembleKalmanFilter.py
src/daComposant/daAlgorithms/ExtendedBlue.py
src/daComposant/daAlgorithms/LinearLeastSquares.py
src/daComposant/daAlgorithms/LocalSensitivityTest.py
src/daComposant/daAlgorithms/NonLinearLeastSquares.py
src/daComposant/daAlgorithms/ParticleSwarmOptimization.py
src/daComposant/daCore/BasicObjects.py
src/daComposant/daCore/NumericObjects.py
src/daComposant/daCore/PlatformInfo.py

index ab604ed31fb3eb3b8f722152e19336b3feec2a0a..073b12939167a2e394daa35a8adca0c586fd62ea 100644 (file)
@@ -186,7 +186,7 @@ generated ADAO scheme:
 
 After that point, all the modifications, executions and post-processing of the
 data assimilation scheme will be done in the YACS module. In order to check the
-result in a simple way, we use the "*UserPostAnalysis*" node (or we create here
+result in a simple way, we can use the "*UserPostAnalysis*" node (or we create
 a new YACS node by using the "*in-line script node*" sub-menu of the YACS
 graphical view).
 
@@ -226,7 +226,7 @@ window in the YACS scheme as shown below:
     **YACS menu for Container Log, and dialog window showing the log**
 
 We verify that the result is correct by checking that the log dialog window
-contains the following line:
+contains information similar to the following line:
 ::
 
     Analysis = [0.5, 0.5, 0.5]
index 7cf6a41f0743054ac50525677a11e6df7fbd108c..8a6f6982723185c205a3d6db271951c41587beee 100644 (file)
@@ -194,9 +194,9 @@ généré :
 
 Après ce point, toutes les modifications, exécutions et post-processing du
 schéma d'assimilation de données seront effectués dans le module YACS. De façon
-à vérifier les résultats d'une manière simple, on utilise le noeud
-"*UserPostAnalysis*" (ou on crée ici un nouveau noeud YACS par le sous-menu
-"*Noeud de script in-line*" dans la vue graphique de YACS).
+à vérifier les résultats d'une manière simple, on peut utiliser le noeud
+"*UserPostAnalysis*" (ou on crée un nouveau noeud YACS par le sous-menu "*Noeud
+de script in-line*" dans la vue graphique de YACS).
 
 Ce noeud de script va récupérer l'analyse issue de l'assimilation de données
 depuis le port de sortie "*algoResults*" du bloc de calcul (qui donne accés à
@@ -236,7 +236,7 @@ fenêtre "*proc*" du schéma YACS comme montré ci-dessous :
     **Menu YACS de la fenêtre de sortie, et boite de dialogue montrant la sortie**
 
 On vérifie que le résultat est correct en observant si la fenêtre de sortie
-contient la ligne suivante :
+contient des informations identiques à la ligne suivante :
 ::
 
     Analysis = [0.5, 0.5, 0.5]
index 45ceacb817d4e2c329e652ba47f2731d661d01ae..d618eb9a21dc3e6d23fdd3b711641dd0e4c3d9b2 100644 (file)
@@ -23,14 +23,14 @@ ASSIMILATION_STUDY(StudyName='Test',
                                           data=_F(FROM='ScriptWithSwitch',
                                                   SCRIPTWITHSWITCH_FILE='test005_ADAO_scripts_for_JDC.py',),),
                    UserPostAnalysis=_F(FROM='String',
-                                       STRING=
+                                       STRING=\
 """import numpy
 Xb = ADD.get("Background")
 Xa = ADD.get("Analysis")[-1]
 print()
-print("Size of Background...........= %i"%len(Xb.A1))
+print("Size of Background...........= %i"%len(Xb))
 print("Size of Analysis.............= %i"%len(Xa))
 print("Min, mean, max of Analysis...= %8.3f, %8.3f, %8.3f"%(min(Xa),numpy.mean(Xa),max(Xa)))
 print()
 """,),);
-#VERSION_CATALOGUE:V8_3_0:FIN VERSION_CATALOGUE
+#VERSION_CATALOGUE:V9_8_0:FIN VERSION_CATALOGUE
index 641a7558cd6e32d395bb270a1a85f79fe9bae799..343868ca39c0f32d2e6f97f4a0f12325f4c3cb22 100644 (file)
@@ -1,4 +1,4 @@
-# -*- coding: utf-8 -*-
+#-*- coding: utf-8 -*-
 study_config = {}
 study_config['StudyType'] = 'ASSIMILATION_STUDY'
 study_config['Name'] = 'Test'
@@ -48,13 +48,14 @@ outputvariables_config['Order'] = ['adao_default']
 outputvariables_config['adao_default'] = -1
 study_config['OutputVariables'] = outputvariables_config
 study_config['Repertory'] = '@prefix@/share/adao_examples/daSalome'
+study_config['ExecuteInContainer'] = 'No'
 Analysis_config = {}
 Analysis_config['From'] = 'String'
 Analysis_config['Data'] = """import numpy
 Xb = ADD.get("Background")
 Xa = ADD.get("Analysis")[-1]
 print()
-print("Size of Background...........= %i"%len(Xb.A1))
+print("Size of Background...........= %i"%len(Xb))
 print("Size of Analysis.............= %i"%len(Xa))
 print("Min, mean, max of Analysis...= %8.3f, %8.3f, %8.3f"%(min(Xa),numpy.mean(Xa),max(Xa)))
 print()
index 72c0597c7210505ebe5300950ed34c89acaf7e32..7d61fdc77211ebf837518a2838e79515803e2c4a 100644 (file)
@@ -41,15 +41,15 @@ J           = ADD.get("CostFunctionJ")[:]
 #
 # Verifying the results by printing
 # ---------------------------------
-print("")
-print("obs = [%s]"%(", ".join(["%.4f"%v for v in ADD.get("Observation").A1])))
-print("")
-print("xb  = [%s]"%(", ".join(["%.4f"%v for v in ADD.get("Background").A1])))
+print()
+print("obs = [%s]"%(", ".join(["%.4f"%v for v in numpy.ravel(ADD.get("Observation"))])))
+print()
+print("xb  = [%s]"%(", ".join(["%.4f"%v for v in numpy.ravel(ADD.get("Background"))])))
 print("xt  = [%s]"%(", ".join(["%.4f"%v for v in numpy.array(xt)])))
 print("xa  = [%s]"%(", ".join(["%.4f"%v for v in numpy.array(xa)])))
-print("")
+print()
 for i in range( len(x_series) ):
-    print("Step %2i : J = %.4e     X = [%s]"%(i, J[i], ", ".join(["%.4f"%v for v in x_series[i]])))
-print("")
+    print("Step %2i : J = %.4e  et  X = [%s]"%(i, J[i], ", ".join(["%.4f"%v for v in x_series[i]])))
+print()
 #
 # ==============================================================================
index e14ef3ffb4863a439dfdc31041e8975e6ed826c4..8864e9e85c9c0313086c3921d4b686d3e1d1f4ec 100644 (file)
@@ -116,38 +116,32 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
         Ha = HO["Adjoint"].asMatrix(Xb)
         Ha = Ha.reshape(Xb.size,Y.size) # ADAO & check shape
         #
-        # Utilisation éventuelle d'un vecteur H(Xb) précalculé
-        # ----------------------------------------------------
         if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
             HXb = HO["AppliedInX"]["HXb"]
         else:
-            HXb = Hm * Xb
-        HXb = numpy.asmatrix(numpy.ravel( HXb )).T
+            HXb = Hm @ Xb
+        HXb = HXb.reshape((-1,1))
         if Y.size != HXb.size:
             raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
         if max(Y.shape) != max(HXb.shape):
             raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
         #
-        # Précalcul des inversions de B et R
-        # ----------------------------------
         BI = B.getI()
         RI = R.getI()
         #
-        # Calcul de l'innovation
-        # ----------------------
-        d  = Y - HXb
+        Innovation  = Y - HXb
         #
         # Calcul de la matrice de gain et de l'analyse
         # --------------------------------------------
         if Y.size <= Xb.size:
             _A = R + numpy.dot(Hm, B * Ha)
-            _u = numpy.linalg.solve( _A , d )
+            _u = numpy.linalg.solve( _A , Innovation )
             Xa = Xb + B * Ha * _u
         else:
             _A = BI + numpy.dot(Ha, RI * Hm)
-            _u = numpy.linalg.solve( _A , numpy.dot(Ha, RI * d) )
+            _u = numpy.linalg.solve( _A , numpy.dot(Ha, RI * Innovation) )
             Xa = Xb + _u
-        self.StoredVariables["Analysis"].store( Xa.A1 )
+        self.StoredVariables["Analysis"].store( Xa )
         #
         # Calcul de la fonction coût
         # --------------------------
@@ -162,15 +156,15 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
             self._toStore("SimulatedObservationAtCurrentState") or \
             self._toStore("SimulatedObservationAtOptimum") or \
             self._toStore("SimulationQuantiles"):
-            HXa = Hm * Xa
+            HXa = Hm @ Xa
             oma = Y - HXa
         if self._parameters["StoreInternalVariables"] or \
             self._toStore("CostFunctionJ")  or self._toStore("CostFunctionJAtCurrentOptimum") or \
             self._toStore("CostFunctionJb") or self._toStore("CostFunctionJbAtCurrentOptimum") or \
             self._toStore("CostFunctionJo") or self._toStore("CostFunctionJoAtCurrentOptimum") or \
             self._toStore("MahalanobisConsistency"):
-            Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
-            Jo  = float( 0.5 * oma.T * RI * oma )
+            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+            Jo  = float( 0.5 * oma.T * (RI * oma) )
             J   = Jb + Jo
             self.StoredVariables["CostFunctionJb"].store( Jb )
             self.StoredVariables["CostFunctionJo"].store( Jo )
@@ -200,35 +194,35 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
         # Calculs et/ou stockages supplémentaires
         # ---------------------------------------
         if self._parameters["StoreInternalVariables"] or self._toStore("CurrentState"):
-            self.StoredVariables["CurrentState"].store( numpy.ravel(Xa) )
+            self.StoredVariables["CurrentState"].store( Xa )
         if self._toStore("CurrentOptimum"):
-            self.StoredVariables["CurrentOptimum"].store( numpy.ravel(Xa) )
+            self.StoredVariables["CurrentOptimum"].store( Xa )
         if self._toStore("Innovation"):
-            self.StoredVariables["Innovation"].store( numpy.ravel(d) )
+            self.StoredVariables["Innovation"].store( Innovation )
         if self._toStore("BMA"):
             self.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
         if self._toStore("OMA"):
-            self.StoredVariables["OMA"].store( numpy.ravel(oma) )
+            self.StoredVariables["OMA"].store( oma )
         if self._toStore("OMB"):
-            self.StoredVariables["OMB"].store( numpy.ravel(d) )
+            self.StoredVariables["OMB"].store( Innovation )
         if self._toStore("SigmaObs2"):
             TraceR = R.trace(Y.size)
-            self.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(oma)).T)) ) / TraceR )
+            self.StoredVariables["SigmaObs2"].store( float( Innovation.T @ oma ) / TraceR )
         if self._toStore("SigmaBck2"):
-            self.StoredVariables["SigmaBck2"].store( float( (d.T * Hm * (Xa - Xb))/(Hm * B * Hm.T).trace() ) )
+            self.StoredVariables["SigmaBck2"].store( float( (Innovation.T @ (Hm @ (Xa - Xb)))/(Hm * (B * Hm.T)).trace() ) )
         if self._toStore("MahalanobisConsistency"):
-            self.StoredVariables["MahalanobisConsistency"].store( float( 2.*J/d.size ) )
+            self.StoredVariables["MahalanobisConsistency"].store( float( 2.*J/Innovation.size ) )
         if self._toStore("SimulationQuantiles"):
             H  = HO["Direct"].appliedTo
             NumericObjects.QuantilesEstimations(self, A, Xa, HXa, H, Hm)
         if self._toStore("SimulatedObservationAtBackground"):
-            self.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
+            self.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
         if self._toStore("SimulatedObservationAtCurrentState"):
-            self.StoredVariables["SimulatedObservationAtCurrentState"].store( numpy.ravel(HXa) )
+            self.StoredVariables["SimulatedObservationAtCurrentState"].store( HXa )
         if self._toStore("SimulatedObservationAtCurrentOptimum"):
-            self.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( numpy.ravel(HXa) )
+            self.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( HXa )
         if self._toStore("SimulatedObservationAtOptimum"):
-            self.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
+            self.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
         #
         self._post_run(HO)
         return 0
index 78e2a3068b523be0d209d628dc0ec5e656ce6547..8840436c146c10368ebd30af0a79f0b1c0c2bbc6 100644 (file)
@@ -73,11 +73,13 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
             default  = "AugmentedWeightedLeastSquares",
             typecast = str,
             message  = "Critère de qualité utilisé",
-            listval  = ["AugmentedWeightedLeastSquares","AWLS","DA",
-                        "WeightedLeastSquares","WLS",
-                        "LeastSquares","LS","L2",
-                        "AbsoluteValue","L1",
-                        "MaximumError","ME"],
+            listval  = [
+                "AugmentedWeightedLeastSquares","AWLS","DA",
+                "WeightedLeastSquares","WLS",
+                "LeastSquares","LS","L2",
+                "AbsoluteValue","L1",
+                "MaximumError","ME",
+                ],
             )
         self.defineRequiredParameter(
             name     = "StoreInternalVariables",
@@ -159,8 +161,8 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
             if QualityMeasure in ["AugmentedWeightedLeastSquares","AWLS","DA"]:
                 if BI is None or RI is None:
                     raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
-                Jb  = 0.5 * (_X - Xb).T * BI * (_X - Xb)
-                Jo  = 0.5 * (_Innovation).T * RI * (_Innovation)
+                Jb  = 0.5 * (_X - Xb).T * (BI * (_X - Xb))
+                Jo  = 0.5 * _Innovation.T * (RI * _Innovation)
             elif QualityMeasure in ["WeightedLeastSquares","WLS"]:
                 if RI is None:
                     raise ValueError("Observation error covariance matrix has to be properly defined!")
@@ -388,13 +390,13 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
         #
         # Obtention de l'analyse
         # ----------------------
-        Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
+        Xa = numpy.ravel( Minimum )
         #
-        self.StoredVariables["Analysis"].store( Xa.A1 )
+        self.StoredVariables["Analysis"].store( Xa )
         #
         # Calculs et/ou stockages supplémentaires
         # ---------------------------------------
-        if self._toStore("OMA" ) or \
+        if self._toStore("OMA") or \
             self._toStore("SimulatedObservationAtOptimum"):
             if self._toStore("SimulatedObservationAtCurrentState"):
                 HXa = self.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
@@ -403,20 +405,22 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
             else:
                 HXa = Hm(Xa)
         if self._toStore("Innovation") or \
-            self._toStore("OMB"):
-            d  = Y - HXb
+            self._toStore("OMB") or \
+            self._toStore("SimulatedObservationAtBackground"):
+            HXb = Hm(Xb)
+            Innovation  = Y - HXb
         if self._toStore("Innovation"):
-            self.StoredVariables["Innovation"].store( numpy.ravel(d) )
+            self.StoredVariables["Innovation"].store( Innovation )
         if self._toStore("OMB"):
-            self.StoredVariables["OMB"].store( numpy.ravel(d) )
+            self.StoredVariables["OMB"].store( Innovation )
         if self._toStore("BMA"):
             self.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
         if self._toStore("OMA"):
             self.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
         if self._toStore("SimulatedObservationAtBackground"):
-            self.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(Hm(Xb)) )
+            self.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
         if self._toStore("SimulatedObservationAtOptimum"):
-            self.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
+            self.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
         #
         self._post_run()
         return 0
index 8e7710fa4deba837c57f31262520933ded820322..02a5327a641e58c0c05a4042867f39d1efe01075 100644 (file)
@@ -187,8 +187,8 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
             if QualityMeasure in ["AugmentedWeightedLeastSquares","AWLS","DA"]:
                 if BI is None or RI is None:
                     raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
-                Jb  = 0.5 * (_X - Xb).T * BI * (_X - Xb)
-                Jo  = 0.5 * (_Innovation).T * RI * (_Innovation)
+                Jb  = 0.5 * (_X - Xb).T * (BI * (_X - Xb))
+                Jo  = 0.5 * _Innovation.T * (RI * _Innovation)
             elif QualityMeasure in ["WeightedLeastSquares","WLS"]:
                 if RI is None:
                     raise ValueError("Observation error covariance matrix has to be properly defined!")
@@ -262,7 +262,8 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
         #
         # Calculs et/ou stockages supplémentaires
         # ---------------------------------------
-        if self._toStore("OMA") or self._toStore("SimulatedObservationAtOptimum"):
+        if self._toStore("OMA") or \
+            self._toStore("SimulatedObservationAtOptimum"):
             if self._toStore("SimulatedObservationAtCurrentState"):
                 HXa = self.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
             elif self._toStore("SimulatedObservationAtCurrentOptimum"):
@@ -270,20 +271,22 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
             else:
                 HXa = Hm(Xa)
         if self._toStore("Innovation") or \
-           self._toStore("OMB"):
-            d  = Y - HXb
+            self._toStore("OMB") or \
+            self._toStore("SimulatedObservationAtBackground"):
+            HXb = Hm(Xb)
+            Innovation  = Y - HXb
         if self._toStore("Innovation"):
-            self.StoredVariables["Innovation"].store( numpy.ravel(d) )
+            self.StoredVariables["Innovation"].store( Innovation )
         if self._toStore("OMB"):
-            self.StoredVariables["OMB"].store( numpy.ravel(d) )
+            self.StoredVariables["OMB"].store( Innovation )
         if self._toStore("BMA"):
             self.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
         if self._toStore("OMA"):
             self.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
         if self._toStore("SimulatedObservationAtBackground"):
-            self.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(Hm(Xb)) )
+            self.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
         if self._toStore("SimulatedObservationAtOptimum"):
-            self.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
+            self.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
         #
         self._post_run()
         return 0
index 3360396228bff5497386b5d3c3f7ef83285ca73c..4416a3ac23bb201e36eaa553324c5cf69c48dba7 100644 (file)
@@ -41,6 +41,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
             message  = "Liste de calculs supplémentaires à stocker et/ou effectuer",
             listval  = [
                 "Analysis",
+                "CurrentOptimum",
                 "CurrentState",
                 "Innovation",
                 "SimulatedObservationAtBackground",
@@ -86,39 +87,42 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
         #
         # Initialisation des opérateurs d'observation et de la matrice gain
         # -----------------------------------------------------------------
-        Hm = HO["Tangent"].asMatrix(None)
-        Hm = Hm.reshape(Y.size,Xb[0].size) # ADAO & check shape
-        Ha = HO["Adjoint"].asMatrix(None)
-        Ha = Ha.reshape(Xb[0].size,Y.size) # ADAO & check shape
+        Xbm = Xb.mean()
+        Hm = HO["Tangent"].asMatrix(Xbm)
+        Hm = Hm.reshape(Y.size,Xbm.size) # ADAO & check shape
+        Ha = HO["Adjoint"].asMatrix(Xbm)
+        Ha = Ha.reshape(Xbm.size,Y.size) # ADAO & check shape
         #
         # Calcul de la matrice de gain dans l'espace le plus petit et de l'analyse
         # ------------------------------------------------------------------------
         if Y.size <= Xb[0].size:
-            K  = B * Ha * (R + Hm * B * Ha).I
+            K  = B * Ha * (R + Hm * (B * Ha)).I
         else:
-            K = (BI + Ha * RI * Hm).I * Ha * RI
+            K = (BI + Ha * (RI * Hm)).I * Ha * RI
         #
         # Calcul du BLUE pour chaque membre de l'ensemble
         # -----------------------------------------------
         for iens in range(nb_ens):
-            HXb = numpy.ravel(numpy.dot(Hm, Xb[iens]))
+            HXb = Hm @ Xb[iens]
             if self._toStore("SimulatedObservationAtBackground"):
                 self.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
-            d  = numpy.ravel(EnsembleY[:,iens]) - HXb
+            Innovation  = numpy.ravel(EnsembleY[:,iens]) - numpy.ravel(HXb)
             if self._toStore("Innovation"):
-                self.StoredVariables["Innovation"].store( d )
-            Xa = numpy.ravel(Xb[iens]) + numpy.dot(K, d)
+                self.StoredVariables["Innovation"].store( Innovation )
+            Xa = Xb[iens] + K @ Innovation
             self.StoredVariables["CurrentState"].store( Xa )
             if self._toStore("SimulatedObservationAtCurrentState"):
-                self.StoredVariables["SimulatedObservationAtCurrentState"].store( numpy.dot(Hm, Xa) )
+                self.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm @ numpy.ravel(Xa) )
         #
         # Fabrication de l'analyse
         # ------------------------
         Members = self.StoredVariables["CurrentState"][-nb_ens:]
         Xa = numpy.array( Members ).mean(axis=0)
         self.StoredVariables["Analysis"].store( Xa )
+        if self._toStore("CurrentOptimum"):
+            self.StoredVariables["CurrentOptimum"].store( Xa )
         if self._toStore("SimulatedObservationAtOptimum"):
-            self.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.dot(Hm, Xa) )
+            self.StoredVariables["SimulatedObservationAtOptimum"].store( Hm @ numpy.ravel(Xa) )
         #
         self._post_run(HO)
         return 0
index 559ad15851b33db46f351c383be42dcc4d3df7c6..e5161a1999361b6969d5c463daeae0213a99dddf 100644 (file)
@@ -40,6 +40,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
                 "MLEF",
                 "IEnKF",
                 "EnKS",
+                "E3DVAR",
                 ],
             listadv  = [
                 "StochasticEnKF",
@@ -56,6 +57,9 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
                 "IEnKF-B",
                 "EnKS-KFF",
                 "IEKF",
+                "E3DVAR-EnKF",
+                "E3DVAR-ETKF",
+                "E3DVAR-MLEF",
                 ],
             )
         self.defineRequiredParameter(
@@ -104,6 +108,14 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
             message  = "Nombre d'intervalles de temps de lissage dans le passé",
             minval   = 0,
             )
+        self.defineRequiredParameter(
+            name     = "HybridCovarianceEquilibrium",
+            default  = 0.5,
+            typecast = float,
+            message  = "Facteur d'équilibre entre la covariance statique et la covariance d'ensemble",
+            minval   = 0.,
+            maxval   = 1.,
+            )
         self.defineRequiredParameter(
             name     = "SetSeed",
             typecast = numpy.random.seed,
@@ -213,6 +225,17 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
             NumericObjects.enks(self, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula")
         #
         #--------------------------
+        # Default E3DVAR = E3DVAR-EnKF
+        elif self._parameters["Variant"] in ["E3DVAR-EnKF", "E3DVAR"]:
+            NumericObjects.senkf(self, Xb, Y, U, HO, EM, CM, R, B, Q, Hybrid="E3DVAR")
+        #
+        elif self._parameters["Variant"] == "E3DVAR-ETKF":
+            NumericObjects.etkf(self, Xb, Y, U, HO, EM, CM, R, B, Q, Hybrid="E3DVAR")
+        #
+        elif self._parameters["Variant"] == "E3DVAR-MLEF":
+            NumericObjects.mlef(self, Xb, Y, U, HO, EM, CM, R, B, Q, Hybrid="E3DVAR")
+        #
+        #--------------------------
         else:
             raise ValueError("Error in Variant name: %s"%self._parameters["Variant"])
         #
index 5b1deee66f60ae0b436244e4c70ff1fc8ae2aa05..9f6cfd22fd3f1ce77bdbab38b8d4dbb4c7ae3c28 100644 (file)
@@ -117,38 +117,32 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
         Ha = Ha.reshape(Xb.size,Y.size) # ADAO & check shape
         H  = HO["Direct"].appliedTo
         #
-        # Utilisation éventuelle d'un vecteur H(Xb) précalculé
-        # ----------------------------------------------------
         if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
             HXb = H( Xb, HO["AppliedInX"]["HXb"])
         else:
             HXb = H( Xb )
-        HXb = numpy.asmatrix(numpy.ravel( HXb )).T
+        HXb = HXb.reshape((-1,1))
         if Y.size != HXb.size:
             raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
         if max(Y.shape) != max(HXb.shape):
             raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
         #
-        # Précalcul des inversions de B et R
-        # ----------------------------------
         BI = B.getI()
         RI = R.getI()
         #
-        # Calcul de l'innovation
-        # ----------------------
-        d  = Y - HXb
+        Innovation  = Y - HXb
         #
         # Calcul de la matrice de gain et de l'analyse
         # --------------------------------------------
         if Y.size <= Xb.size:
             _A = R + numpy.dot(Hm, B * Ha)
-            _u = numpy.linalg.solve( _A , d )
+            _u = numpy.linalg.solve( _A , Innovation )
             Xa = Xb + B * Ha * _u
         else:
             _A = BI + numpy.dot(Ha, RI * Hm)
-            _u = numpy.linalg.solve( _A , numpy.dot(Ha, RI * d) )
+            _u = numpy.linalg.solve( _A , numpy.dot(Ha, RI * Innovation) )
             Xa = Xb + _u
-        self.StoredVariables["Analysis"].store( Xa.A1 )
+        self.StoredVariables["Analysis"].store( Xa )
         #
         # Calcul de la fonction coût
         # --------------------------
@@ -163,15 +157,15 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
             self._toStore("SimulatedObservationAtCurrentState") or \
             self._toStore("SimulatedObservationAtOptimum") or \
             self._toStore("SimulationQuantiles"):
-            HXa  = numpy.matrix(numpy.ravel( H( Xa ) )).T
+            HXa  = H( Xa ).reshape((-1,1))
             oma = Y - HXa
         if self._parameters["StoreInternalVariables"] or \
             self._toStore("CostFunctionJ")  or self._toStore("CostFunctionJAtCurrentOptimum") or \
             self._toStore("CostFunctionJb") or self._toStore("CostFunctionJbAtCurrentOptimum") or \
             self._toStore("CostFunctionJo") or self._toStore("CostFunctionJoAtCurrentOptimum") or \
             self._toStore("MahalanobisConsistency"):
-            Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
-            Jo  = float( 0.5 * oma.T * RI * oma )
+            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+            Jo  = float( 0.5 * oma.T * (RI * oma) )
             J   = Jb + Jo
             self.StoredVariables["CostFunctionJb"].store( Jb )
             self.StoredVariables["CostFunctionJo"].store( Jo )
@@ -201,36 +195,36 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
         # Calculs et/ou stockages supplémentaires
         # ---------------------------------------
         if self._parameters["StoreInternalVariables"] or self._toStore("CurrentState"):
-            self.StoredVariables["CurrentState"].store( numpy.ravel(Xa) )
+            self.StoredVariables["CurrentState"].store( Xa )
         if self._toStore("CurrentOptimum"):
-            self.StoredVariables["CurrentOptimum"].store( numpy.ravel(Xa) )
+            self.StoredVariables["CurrentOptimum"].store( Xa )
         if self._toStore("Innovation"):
-            self.StoredVariables["Innovation"].store( numpy.ravel(d) )
+            self.StoredVariables["Innovation"].store( Innovation )
         if self._toStore("BMA"):
             self.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
         if self._toStore("OMA"):
-            self.StoredVariables["OMA"].store( numpy.ravel(oma) )
+            self.StoredVariables["OMA"].store( oma )
         if self._toStore("OMB"):
-            self.StoredVariables["OMB"].store( numpy.ravel(d) )
+            self.StoredVariables["OMB"].store( Innovation )
         if self._toStore("SigmaObs2"):
             TraceR = R.trace(Y.size)
-            self.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(oma)).T)) ) / TraceR )
+            self.StoredVariables["SigmaObs2"].store( float( Innovation.T @ oma ) / TraceR )
         if self._toStore("SigmaBck2"):
-            self.StoredVariables["SigmaBck2"].store( float( (d.T * Hm * (Xa - Xb))/(Hm * B * Hm.T).trace() ) )
+            self.StoredVariables["SigmaBck2"].store( float( (Innovation.T @ (Hm @ (Xa - Xb)))/(Hm * (B * Hm.T)).trace() ) )
         if self._toStore("MahalanobisConsistency"):
-            self.StoredVariables["MahalanobisConsistency"].store( float( 2.*J/d.size ) )
+            self.StoredVariables["MahalanobisConsistency"].store( float( 2.*J/Innovation.size ) )
         if self._toStore("SimulationQuantiles"):
             HtM  = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
             HtM  = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
             NumericObjects.QuantilesEstimations(self, A, Xa, HXa, H, HtM)
         if self._toStore("SimulatedObservationAtBackground"):
-            self.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
+            self.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
         if self._toStore("SimulatedObservationAtCurrentState"):
-            self.StoredVariables["SimulatedObservationAtCurrentState"].store( numpy.ravel(HXa) )
+            self.StoredVariables["SimulatedObservationAtCurrentState"].store( HXa )
         if self._toStore("SimulatedObservationAtCurrentOptimum"):
-            self.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( numpy.ravel(HXa) )
+            self.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( HXa )
         if self._toStore("SimulatedObservationAtOptimum"):
-            self.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
+            self.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
         #
         self._post_run(HO)
         return 0
index 2da26a48547626736318d05616fcaa29ed8686a0..203f67d1de1afe7b1e40aeaa68dbbfaadd673da0 100644 (file)
@@ -80,7 +80,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
         #
         # Calcul de la matrice de gain et de l'analyse
         # --------------------------------------------
-        K = (Ha * RI * Hm).I * Ha * RI
+        K = (Ha * (RI * Hm)).I * Ha * RI
         Xa =  K * Y
         self.StoredVariables["Analysis"].store( Xa )
         #
@@ -101,7 +101,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
             self._toStore("CostFunctionJb") or self._toStore("CostFunctionJbAtCurrentOptimum") or \
             self._toStore("CostFunctionJo") or self._toStore("CostFunctionJoAtCurrentOptimum"):
             Jb  = 0.
-            Jo  = float( 0.5 * oma.T * RI * oma )
+            Jo  = float( 0.5 * oma.T * (RI * oma) )
             J   = Jb + Jo
             self.StoredVariables["CostFunctionJb"].store( Jb )
             self.StoredVariables["CostFunctionJo"].store( Jo )
index 7b71fd2e569fa14d004dff52b00718a15cafa29c..d16837b74211b244bfc3fc7e366faaa4e4f2566c 100644 (file)
@@ -79,7 +79,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
             if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
                 HXb = HO["AppliedInX"]["HXb"]
             else:
-                HXb = Ht * Xb
+                HXb = Ht @ Xb
             HXb = numpy.asmatrix(numpy.ravel( HXb )).T
             if Y.size != HXb.size:
                 raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
index 61a2613be5aa62340983bbf33b1ed8a7a455cade..d7031d436b719f19d0c58e65c5dbe50ee506b871 100644 (file)
@@ -33,7 +33,14 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
             default  = "LBFGSB",
             typecast = str,
             message  = "Minimiseur utilisé",
-            listval  = ["LBFGSB","TNC", "CG", "NCG", "BFGS", "LM"],
+            listval  = [
+                "LBFGSB",
+                "TNC",
+                "CG",
+                "NCG",
+                "BFGS",
+                "LM",
+                ],
             )
         self.defineRequiredParameter(
             name     = "MaximumNumberOfSteps",
@@ -99,7 +106,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
             )
         self.defineRequiredParameter( # Pas de type
             name     = "Bounds",
-            message  = "Liste des valeurs de bornes",
+            message  = "Liste des paires de bornes",
             )
         self.defineRequiredParameter(
             name     = "InitializationPoint",
@@ -118,18 +125,16 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
     def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
         self._pre_run(Parameters, Xb, Y, U, HO, EM, CM, R, B, Q)
         #
-        # Opérateurs
-        # ----------
+        # Initialisations
+        # ---------------
         Hm = HO["Direct"].appliedTo
         Ha = HO["Adjoint"].appliedInXTo
         #
-        # Utilisation éventuelle d'un vecteur H(Xb) précalculé
-        # ----------------------------------------------------
         if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
             HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
         else:
             HXb = Hm( Xb )
-        HXb = numpy.asmatrix(numpy.ravel( HXb )).T
+        HXb = HXb.reshape((-1,1))
         if Y.size != HXb.size:
             raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
         if max(Y.shape) != max(HXb.shape):
@@ -139,16 +144,17 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
         if self._parameters["Minimizer"] == "LM":
             RdemiI = R.choleskyI()
         #
+        Xini = self._parameters["InitializationPoint"]
+        #
         # Définition de la fonction-coût
         # ------------------------------
         def CostFunction(x):
-            _X  = numpy.asmatrix(numpy.ravel( x )).T
+            _X  = numpy.ravel( x ).reshape((-1,1))
             if self._parameters["StoreInternalVariables"] or \
                 self._toStore("CurrentState") or \
                 self._toStore("CurrentOptimum"):
                 self.StoredVariables["CurrentState"].store( _X )
-            _HX = Hm( _X )
-            _HX = numpy.asmatrix(numpy.ravel( _HX )).T
+            _HX = Hm( _X ).reshape((-1,1))
             _Innovation = Y - _HX
             if self._toStore("SimulatedObservationAtCurrentState") or \
                 self._toStore("SimulatedObservationAtCurrentOptimum"):
@@ -157,7 +163,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
                 self.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
             #
             Jb  = 0.
-            Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
+            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
             J   = Jb + Jo
             #
             self.StoredVariables["CurrentIterationNumber"].store( len(self.StoredVariables["CostFunctionJ"]) )
@@ -186,21 +192,19 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
             return J
         #
         def GradientOfCostFunction(x):
-            _X      = numpy.asmatrix(numpy.ravel( x )).T
-            _HX     = Hm( _X )
-            _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
+            _X      = x.reshape((-1,1))
+            _HX     = Hm( _X ).reshape((-1,1))
             GradJb  = 0.
             GradJo  = - Ha( (_X, RI * (Y - _HX)) )
-            GradJ   = numpy.asmatrix( numpy.ravel( GradJb ) + numpy.ravel( GradJo ) ).T
-            return GradJ.A1
+            GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
+            return GradJ
         #
         def CostFunctionLM(x):
-            _X  = numpy.asmatrix(numpy.ravel( x )).T
-            _HX = Hm( _X )
-            _HX = numpy.asmatrix(numpy.ravel( _HX )).T
+            _X  = numpy.ravel( x ).reshape((-1,1))
+            _HX = Hm( _X ).reshape((-1,1))
             _Innovation = Y - _HX
             Jb  = 0.
-            Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
+            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
             J   = Jb + Jo
             if self._parameters["StoreInternalVariables"] or \
                 self._toStore("CurrentState"):
@@ -212,24 +216,14 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
             return numpy.ravel( RdemiI*_Innovation )
         #
         def GradientOfCostFunctionLM(x):
-            _X      = numpy.asmatrix(numpy.ravel( x )).T
-            _HX     = Hm( _X )
-            _HX     = numpy.asmatrix(numpy.ravel( _HX )).T
-            GradJb  = 0.
-            GradJo  = - Ha( (_X, RI * (Y - _HX)) )
-            GradJ   = numpy.asmatrix( numpy.ravel( GradJb ) + numpy.ravel( GradJo ) ).T
+            _X      = x.reshape((-1,1))
             return - RdemiI*HO["Tangent"].asMatrix( _X )
         #
-        # Point de démarrage de l'optimisation : Xini = Xb
-        # ------------------------------------
-        Xini = self._parameters["InitializationPoint"]
-        #
         # Minimisation de la fonctionnelle
         # --------------------------------
         nbPreviousSteps = self.StoredVariables["CostFunctionJ"].stepnumber()
         #
         if self._parameters["Minimizer"] == "LBFGSB":
-            # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
             if "0.19" <= scipy.version.version <= "1.1.0":
                 import lbfgsbhlt as optimiseur
             else:
@@ -315,9 +309,8 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
         if self._parameters["StoreInternalVariables"] or self._toStore("CurrentState"):
             Minimum = self.StoredVariables["CurrentState"][IndexMin]
         #
-        # Obtention de l'analyse
-        # ----------------------
-        Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
+        Xa = Minimum
+        #--------------------------
         #
         self.StoredVariables["Analysis"].store( Xa )
         #
@@ -330,20 +323,19 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
             else:
                 HXa = Hm( Xa )
         #
-        #
         # Calculs et/ou stockages supplémentaires
         # ---------------------------------------
         if self._toStore("Innovation") or \
             self._toStore("OMB"):
-            d  = Y - HXb
+            Innovation  = Y - HXb
         if self._toStore("Innovation"):
-            self.StoredVariables["Innovation"].store( d )
+            self.StoredVariables["Innovation"].store( Innovation )
         if self._toStore("BMA"):
             self.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
         if self._toStore("OMA"):
             self.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
         if self._toStore("OMB"):
-            self.StoredVariables["OMB"].store( d )
+            self.StoredVariables["OMB"].store( Innovation )
         if self._toStore("SimulatedObservationAtBackground"):
             self.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
         if self._toStore("SimulatedObservationAtOptimum"):
index d9955045dcee1c35748d2a529670d7d1ec0117e8..34c529df62c4b0531949acecfdcba26183325258 100644 (file)
@@ -143,12 +143,8 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
         Phip = 1. - Phig
         logging.debug("%s Taux de rappel au meilleur insecte du groupe (entre 0 et 1) = %s et à la meilleure position précédente (son complémentaire à 1) = %s"%(self._name, str(Phig), str(Phip)))
         #
-        # Opérateur d'observation
-        # -----------------------
         Hm = HO["Direct"].appliedTo
         #
-        # Précalcul des inversions de B et R
-        # ----------------------------------
         BI = B.getI()
         RI = R.getI()
         #
@@ -183,17 +179,11 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
             #
             return J
         #
-        # Point de démarrage de l'optimisation : Xini = Xb
-        # ------------------------------------
-        if isinstance(Xb, type(numpy.matrix([]))):
-            Xini = Xb.A1.tolist()
-        elif Xb is not None:
-            Xini = list(Xb)
+        if Xb is not None:
+            Xini = numpy.ravel(Xb)
         else:
             Xini = numpy.zeros(len(BoxBounds[:,0]))
         #
-        # Initialisation des bornes
-        # -------------------------
         SpaceUp  = BoxBounds[:,1] + Xini
         SpaceLow = BoxBounds[:,0] + Xini
         nbparam  = len(SpaceUp)
@@ -286,17 +276,17 @@ class ElementaryAlgorithm(BasicObjects.Algorithm):
         # Calculs et/ou stockages supplémentaires
         # ---------------------------------------
         if self._toStore("Innovation"):
-            self.StoredVariables["Innovation"].store( numpy.ravel(d) )
+            self.StoredVariables["Innovation"].store( d )
         if self._toStore("BMA"):
             self.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
         if self._toStore("OMA"):
             self.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
         if self._toStore("OMB"):
-            self.StoredVariables["OMB"].store( numpy.ravel(d) )
+            self.StoredVariables["OMB"].store( d )
         if self._toStore("SimulatedObservationAtBackground"):
-            self.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
+            self.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
         if self._toStore("SimulatedObservationAtOptimum"):
-            self.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
+            self.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
         #
         self._post_run(HO)
         return 0
index 1293b9643a8fe1fc3d6c5aa9c3134c5aafd73c23..e8b5755065a5855011015f23037290f795bdf3e5 100644 (file)
@@ -137,7 +137,7 @@ class Operator(object):
         Arguments :
         - name : nom d'opérateur
         - fromMethod : argument de type fonction Python
-        - fromMatrix : argument adapté au constructeur numpy.matrix
+        - fromMatrix : argument adapté au constructeur numpy.array/matrix
         - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
         - reducingMemoryUse : booléen forçant (ou pas) des calculs moins
           gourmands en mémoire
@@ -163,7 +163,9 @@ class Operator(object):
             self.__Type   = "Method"
         elif fromMatrix is not None:
             self.__Method = None
-            self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
+            if isinstance(fromMatrix, str):
+               fromMatrix = PlatformInfo.strmatrix2liststr( fromMatrix )
+            self.__Matrix = numpy.asarray( fromMatrix, dtype=float )
             self.__Type   = "Matrix"
         else:
             self.__Method = None
@@ -211,7 +213,7 @@ class Operator(object):
             assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
             _HxValue = []
             for i in range(len(_HValue)):
-                _HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T )
+                _HxValue.append( _HValue[i] )
                 if self.__avoidRC:
                     Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name)
         else:
@@ -230,8 +232,7 @@ class Operator(object):
                 else:
                     if self.__Matrix is not None:
                         self.__addOneMatrixCall()
-                        _xv = numpy.ravel(xv).reshape((-1,1))
-                        _hv = self.__Matrix * _xv
+                        _hv = self.__Matrix @ numpy.ravel(xv)
                     else:
                         self.__addOneMethodCall()
                         _xserie.append( xv )
@@ -279,9 +280,8 @@ class Operator(object):
             _HxValue = []
             for paire in _xuValue:
                 _xValue, _uValue = paire
-                _xValue = numpy.matrix(numpy.ravel(_xValue)).T
                 self.__addOneMatrixCall()
-                _HxValue.append( self.__Matrix * _xValue )
+                _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
         else:
             _xuArgs = []
             for paire in _xuValue:
@@ -326,9 +326,8 @@ class Operator(object):
             _HxValue = []
             for paire in _nxValue:
                 _xNominal, _xValue = paire
-                _xValue = numpy.matrix(numpy.ravel(_xValue)).T
                 self.__addOneMatrixCall()
-                _HxValue.append( self.__Matrix * _xValue )
+                _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
         else:
             self.__addOneMethodCall( len(_nxValue) )
             if self.__extraArgs is None:
@@ -354,7 +353,7 @@ class Operator(object):
             if argsAsSerie:
                 self.__addOneMethodCall( len(ValueForMethodForm) )
                 for _vfmf in ValueForMethodForm:
-                    mValue.append( numpy.matrix( self.__Method(((_vfmf, None),)) ) )
+                    mValue.append( self.__Method(((_vfmf, None),)) )
             else:
                 self.__addOneMethodCall()
                 mValue = self.__Method(((ValueForMethodForm, None),))
@@ -557,7 +556,9 @@ class FullOperator(object):
             self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = __Function["Tangent"], reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
             self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = __Function["Adjoint"], reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
         elif asMatrix is not None:
-            __matrice = numpy.matrix( __Matrix, numpy.float )
+            if isinstance(__Matrix, str):
+                __Matrix = PlatformInfo.strmatrix2liststr( __Matrix )
+            __matrice = numpy.asarray( __Matrix, dtype=float )
             self.__FO["Direct"]  = Operator( name = self.__name,           fromMatrix = __matrice,   reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
             self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMatrix = __matrice,   reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF )
             self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMatrix = __matrice.T, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF )
@@ -1076,6 +1077,45 @@ class Algorithm(object):
         else:
             return __SC
 
+# ==============================================================================
+class PartialAlgorithm(object):
+    """
+    Classe pour mimer "Algorithm" du point de vue stockage, mais sans aucune
+    action avancée comme la vérification . Pour les méthodes reprises ici,
+    le fonctionnement est identique à celles de la classe "Algorithm".
+    """
+    def __init__(self, name):
+        self._name = str( name )
+        self._parameters = {"StoreSupplementaryCalculations":[]}
+        #
+        self.StoredVariables = {}
+        self.StoredVariables["Analysis"]                             = Persistence.OneVector(name = "Analysis")
+        self.StoredVariables["CostFunctionJ"]                        = Persistence.OneScalar(name = "CostFunctionJ")
+        self.StoredVariables["CostFunctionJb"]                       = Persistence.OneScalar(name = "CostFunctionJb")
+        self.StoredVariables["CostFunctionJo"]                       = Persistence.OneScalar(name = "CostFunctionJo")
+        self.StoredVariables["CurrentIterationNumber"]               = Persistence.OneIndex(name  = "CurrentIterationNumber")
+        #
+        self.__canonical_stored_name = {}
+        for k in self.StoredVariables:
+            self.__canonical_stored_name[k.lower()] = k
+
+    def _toStore(self, key):
+        "True if in StoreSupplementaryCalculations, else False"
+        return key in self._parameters["StoreSupplementaryCalculations"]
+
+    def get(self, key=None):
+        """
+        Renvoie l'une des variables stockées identifiée par la clé, ou le
+        dictionnaire de l'ensemble des variables disponibles en l'absence de
+        clé. Ce sont directement les variables sous forme objet qui sont
+        renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
+        des classes de persistance.
+        """
+        if key is not None:
+            return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
+        else:
+            return self.StoredVariables
+
 # ==============================================================================
 class AlgorithmAndParameters(object):
     """
@@ -1431,9 +1471,9 @@ class AlgorithmAndParameters(object):
         if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
             if self.__algorithmName in ["EnsembleBlue",]:
                 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
-                self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
+                self.__Xb = Persistence.OneVector("Background")
                 for member in asPersistentVector:
-                    self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
+                    self.__Xb.store( numpy.asarray(member, dtype=float) )
                 __Xb_shape = min(__B_shape)
             else:
                 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
@@ -1721,16 +1761,22 @@ class State(object):
         #
         if __Vector is not None:
             self.__is_vector = True
-            self.__V         = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
+            if isinstance(__Vector, str):
+               __Vector = PlatformInfo.strvect2liststr( __Vector )
+            self.__V         = numpy.ravel(numpy.asarray( __Vector, dtype=float )).reshape((-1,1))
             self.shape       = self.__V.shape
             self.size        = self.__V.size
         elif __Series is not None:
             self.__is_series  = True
             if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
-                self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
-                if isinstance(__Series, str): __Series = eval(__Series)
+                #~ self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
+                self.__V = Persistence.OneVector(self.__name)
+                if isinstance(__Series, str):
+                    __Series = PlatformInfo.strmatrix2liststr(__Series)
                 for member in __Series:
-                    self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
+                    if isinstance(member, str):
+                        member = PlatformInfo.strvect2liststr( member )
+                    self.__V.store(numpy.asarray( member, dtype=float ))
             else:
                 self.__V = __Series
             if isinstance(self.__V.shape, (tuple, list)):
@@ -1825,7 +1871,7 @@ class Covariance(object):
         #
         if __Scalar is not None:
             if isinstance(__Scalar, str):
-                __Scalar = __Scalar.replace(";"," ").replace(","," ").split()
+                __Scalar = PlatformInfo.strvect2liststr( __Scalar )
                 if len(__Scalar) > 0: __Scalar = __Scalar[0]
             if numpy.array(__Scalar).size != 1:
                 raise ValueError('  The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n  Its actual measured size is %i. Please check your scalar input.'%numpy.array(__Scalar).size)
@@ -1835,9 +1881,9 @@ class Covariance(object):
             self.size        = 0
         elif __Vector is not None:
             if isinstance(__Vector, str):
-                __Vector = __Vector.replace(";"," ").replace(","," ").split()
+                __Vector = PlatformInfo.strvect2liststr( __Vector )
             self.__is_vector = True
-            self.__C         = numpy.abs( numpy.array( numpy.ravel( __Vector ), dtype=float ) )
+            self.__C         = numpy.abs( numpy.ravel(numpy.asarray( __Vector, dtype=float )) )
             self.shape       = (self.__C.size,self.__C.size)
             self.size        = self.__C.size**2
         elif __Matrix is not None:
@@ -2019,14 +2065,14 @@ class Covariance(object):
     def asfullmatrix(self, msize=None):
         "Matrice pleine"
         if   self.ismatrix():
-            return numpy.asarray(self.__C)
+            return numpy.asarray(self.__C, dtype=float)
         elif self.isvector():
-            return numpy.asarray( numpy.diag(self.__C), float )
+            return numpy.asarray( numpy.diag(self.__C), dtype=float )
         elif self.isscalar():
             if msize is None:
                 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
             else:
-                return numpy.asarray( self.__C * numpy.eye(int(msize)), float )
+                return numpy.asarray( self.__C * numpy.eye(int(msize)), dtype=float )
         elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
             return self.__C.asfullmatrix()
         else:
@@ -2182,6 +2228,8 @@ class Covariance(object):
                 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
         elif self.isscalar() and isinstance(other,numpy.matrix):
             return other * self.__C
+        elif self.isscalar() and isinstance(other,float):
+            return other * self.__C
         elif self.isobject():
             return self.__C.__rmul__(other)
         else:
index 58d723e6f6ae230a3ae4891a12efe63c702e7ee4..609ac9686f4d1842c417b379e95cd1b763708de6 100644 (file)
@@ -27,7 +27,7 @@ __author__ = "Jean-Philippe ARGAUD"
 
 import os, time, copy, types, sys, logging
 import math, numpy, scipy, scipy.optimize, scipy.version
-from daCore.BasicObjects import Operator
+from daCore.BasicObjects import Operator, Covariance, PartialAlgorithm
 from daCore.PlatformInfo import PlatformInfo
 mpr = PlatformInfo().MachinePrecision()
 mfp = PlatformInfo().MaximumPrecision()
@@ -486,12 +486,12 @@ def EnsembleOfCenteredPerturbations( _bgcenter, _bgcovariance, _nbmembers ):
         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
     #
     if _bgcovariance is None:
-        BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
+        _Perturbations = numpy.tile( _bgcenter, _nbmembers)
     else:
         _Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
-        BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
+        _Perturbations = numpy.tile( _bgcenter, _nbmembers) + _Z
     #
-    return BackgroundEnsemble
+    return _Perturbations
 
 # ==============================================================================
 def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
@@ -513,29 +513,29 @@ def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _wi
     if _nbmembers < 1:
         raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
     if _bgcovariance is None:
-        BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
+        _Perturbations = numpy.tile( _bgcenter, _nbmembers)
     else:
         if _withSVD:
-            U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
+            _U, _s, _V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
             _nbctl = _bgcenter.size
             if _nbmembers > _nbctl:
                 _Z = numpy.concatenate((numpy.dot(
-                    numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
+                    numpy.diag(numpy.sqrt(_s[:_nbctl])), _V[:_nbctl]),
                     numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
             else:
-                _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
+                _Z = numpy.dot(numpy.diag(numpy.sqrt(_s[:_nbmembers-1])), _V[:_nbmembers-1])
             _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
-            BackgroundEnsemble = _bgcenter + _Zca
+            _Perturbations = _bgcenter + _Zca
         else:
             if max(abs(_bgcovariance.flatten())) > 0:
                 _nbctl = _bgcenter.size
                 _Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
                 _Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
-                BackgroundEnsemble = _bgcenter + _Zca
+                _Perturbations = _bgcenter + _Zca
             else:
-                BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
+                _Perturbations = numpy.tile( _bgcenter, _nbmembers)
     #
-    return BackgroundEnsemble
+    return _Perturbations
 
 # ==============================================================================
 def EnsembleMean( __Ensemble ):
@@ -799,7 +799,7 @@ def ApplyBounds( __Vector, __Bounds, __newClip = True):
     if not isinstance(__Bounds, numpy.ndarray): # Is an array
         raise ValueError("Incorrect array definition of bounds data")
     if 2*__Vector.size != __Bounds.size: # Is a 2 column array of vector lenght
-        raise ValueError("Incorrect bounds number to be applied for this vector")
+        raise ValueError("Incorrect bounds number (%i) to be applied for this vector (of size %i)"%(__Bounds.size,__Vector.size))
     if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
         raise ValueError("Incorrectly shaped bounds data")
     #
@@ -815,6 +815,31 @@ def ApplyBounds( __Vector, __Bounds, __newClip = True):
     #
     return __Vector
 
+# ==============================================================================
+def Apply3DVarRecentringOnEnsemble(__EnXn, __EnXf, __Ynpu, __HO, __R, __B, __Betaf):
+    "Recentre l'ensemble Xn autour de l'analyse 3DVAR"
+    #
+    Xf = EnsembleMean( __EnXf )
+    Pf = Covariance( asCovariance=EnsembleErrorCovariance(__EnXf) )
+    Pf = (1 - __Betaf) * __B + __Betaf * Pf
+    #
+    selfB = PartialAlgorithm("3DVAR")
+    selfB._parameters["Minimizer"] = "LBFGSB"
+    selfB._parameters["MaximumNumberOfSteps"] = 15000
+    selfB._parameters["CostDecrementTolerance"] = 1.e-7
+    selfB._parameters["ProjectedGradientTolerance"] = -1
+    selfB._parameters["GradientNormTolerance"] = 1.e-05
+    selfB._parameters["StoreInternalVariables"] = False
+    selfB._parameters["optiprint"] = -1
+    selfB._parameters["optdisp"] = 0
+    selfB._parameters["Bounds"] = None
+    selfB._parameters["InitializationPoint"] = Xf
+    std3dvar(selfB, Xf, __Ynpu, None, __HO, None, None, __R, Pf, None)
+    Xa = selfB.get("Analysis")[-1].reshape((-1,1))
+    del selfB
+    #
+    return Xa + EnsembleOfAnomalies( __EnXn )
+
 # ==============================================================================
 def c2ukf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
     """
@@ -926,7 +951,7 @@ def c2ukf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
                 XEtnnpi = numpy.asarray( Mm( (Xnp[:,point], Un) ) ).reshape((-1,1))
                 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
                     Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
-                    XEtnnpi = XEtnnpi + Cm * Un
+                    XEtnnpi = XEtnnpi + Cm @ Un
                 if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
                     XEtnnpi = ApplyBounds( XEtnnpi, selfA._parameters["Bounds"] )
             elif selfA._parameters["EstimationOf"] == "Parameters":
@@ -976,7 +1001,7 @@ def c2ukf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         _Innovation  = Ynpu - Yncm.reshape((-1,1))
         if selfA._parameters["EstimationOf"] == "Parameters":
             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
-                _Innovation = _Innovation - Cm * Un
+                _Innovation = _Innovation - Cm @ Un
         #
         Kn = Pxyn * Pyyn.I
         Xn = Xncm.reshape((-1,1)) + Kn * _Innovation
@@ -1020,7 +1045,7 @@ def c2ukf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
             or selfA._toStore("CostFunctionJo") \
             or selfA._toStore("CurrentOptimum") \
             or selfA._toStore("APosterioriCovariance"):
-            Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
+            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
             Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
             J   = Jb + Jo
             selfA.StoredVariables["CostFunctionJb"].store( Jb )
@@ -1160,7 +1185,7 @@ def cekf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
             Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
-                Xn_predicted = Xn_predicted + Cm * Un
+                Xn_predicted = Xn_predicted + Cm @ Un
             Pn_predicted = Q + Mt * (Pn * Ma)
         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
             # --- > Par principe, M = Id, Q = 0
@@ -1177,7 +1202,7 @@ def cekf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
             HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
             _Innovation  = Ynpu - HX_predicted
             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
-                _Innovation = _Innovation - Cm * Un
+                _Innovation = _Innovation - Cm @ Un
         #
         Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
         Xn = Xn_predicted + Kn * _Innovation
@@ -1353,7 +1378,7 @@ def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterForm
                     returnSerieAsArrayMatrix = True )
                 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
                     Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
-                    EZ = EZ + Cm * Un
+                    EZ = EZ + Cm @ Un
             elif selfA._parameters["EstimationOf"] == "Parameters":
                 # --- > Par principe, M = Id, Q = 0
                 EZ = H( [(EL[:,i], Un) for i in range(__m)],
@@ -1409,7 +1434,10 @@ def enks(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterForm
     return 0
 
 # ==============================================================================
-def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
+def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
+    VariantM="KalmanFilterFormula",
+    Hybrid=None,
+    ):
     """
     Ensemble-Transform EnKF
     """
@@ -1499,7 +1527,7 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
                 returnSerieAsArrayMatrix = True )
             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
-                Xn_predicted = Xn_predicted + Cm * Un
+                Xn_predicted = Xn_predicted + Cm @ Un
         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
             # --- > Par principe, M = Id, Q = 0
             Xn_predicted = EMX = Xn
@@ -1508,8 +1536,8 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
                 returnSerieAsArrayMatrix = True )
         #
         # Mean of forecast and observation of forecast
-        Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
-        Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
+        Xfm  = EnsembleMean( Xn_predicted )
+        Hfm  = EnsembleMean( HX_predicted )
         #
         # Anomalies
         EaX   = EnsembleOfAnomalies( Xn_predicted, Xfm )
@@ -1597,7 +1625,7 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
             HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
             def CostFunction(w):
                 _A  = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
-                _Jo = 0.5 * _A.T * RI * _A
+                _Jo = 0.5 * _A.T * (RI * _A)
                 _Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
                 _J  = _Jo + _Jb
                 return float(_J)
@@ -1668,7 +1696,11 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
                 selfA._parameters["InflationFactor"],
                 )
         #
-        Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+        if Hybrid == "E3DVAR":
+            betaf = selfA._parameters["HybridCovarianceEquilibrium"]
+            Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
+        #
+        Xa = EnsembleMean( Xn )
         #--------------------------
         selfA._setInternalState("Xn", Xn)
         selfA._setInternalState("seed", numpy.random.get_state())
@@ -1682,7 +1714,7 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
             or selfA._toStore("InnovationAtCurrentAnalysis") \
             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
+            _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
             _Innovation = Ynpu - _HXa
         #
         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
@@ -1714,8 +1746,8 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
             or selfA._toStore("CostFunctionJo") \
             or selfA._toStore("CurrentOptimum") \
             or selfA._toStore("APosterioriCovariance"):
-            Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
-            Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
+            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
             J   = Jb + Jo
             selfA.StoredVariables["CostFunctionJb"].store( Jb )
             selfA.StoredVariables["CostFunctionJo"].store( Jo )
@@ -1853,7 +1885,7 @@ def exkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
             Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
-                Xn_predicted = Xn_predicted + Cm * Un
+                Xn_predicted = Xn_predicted + Cm @ Un
             Pn_predicted = Q + Mt * (Pn * Ma)
         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
             # --- > Par principe, M = Id, Q = 0
@@ -1867,7 +1899,7 @@ def exkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
             HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
             _Innovation  = Ynpu - HX_predicted
             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
-                _Innovation = _Innovation - Cm * Un
+                _Innovation = _Innovation - Cm @ Un
         #
         Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
         Xn = Xn_predicted + Kn * _Innovation
@@ -2021,11 +2053,11 @@ def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
         #
         if U is not None:
             if hasattr(U,"store") and len(U)>1:
-                Un = numpy.asmatrix(numpy.ravel( U[step] )).T
+                Un = numpy.ravel( U[step] ).reshape((-1,1))
             elif hasattr(U,"store") and len(U)==1:
-                Un = numpy.asmatrix(numpy.ravel( U[0] )).T
+                Un = numpy.ravel( U[0] ).reshape((-1,1))
             else:
-                Un = numpy.asmatrix(numpy.ravel( U )).T
+                Un = numpy.ravel( U ).reshape((-1,1))
         else:
             Un = None
         #
@@ -2100,7 +2132,7 @@ def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
                 selfA._parameters["InflationFactor"],
                 )
         #
-        Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+        Xa = EnsembleMean( Xn )
         #--------------------------
         selfA._setInternalState("Xn", Xn)
         selfA._setInternalState("seed", numpy.random.get_state())
@@ -2114,7 +2146,7 @@ def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
             or selfA._toStore("InnovationAtCurrentAnalysis") \
             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
+            _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
             _Innovation = Ynpu - _HXa
         #
         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
@@ -2146,8 +2178,8 @@ def ienkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="IEnKF12",
             or selfA._toStore("CostFunctionJo") \
             or selfA._toStore("CurrentOptimum") \
             or selfA._toStore("APosterioriCovariance"):
-            Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
-            Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
+            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
             J   = Jb + Jo
             selfA.StoredVariables["CostFunctionJb"].store( Jb )
             selfA.StoredVariables["CostFunctionJo"].store( Jo )
@@ -2211,7 +2243,7 @@ def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
     #
     Xini = selfA._parameters["InitializationPoint"]
     #
-    HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
+    HXb = numpy.ravel( Hm( Xb ) ).reshape((-1,1))
     Innovation = Y - HXb
     #
     # Outer Loop
@@ -2230,13 +2262,13 @@ def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         # Définition de la fonction-coût
         # ------------------------------
         def CostFunction(dx):
-            _dX  = numpy.asmatrix(numpy.ravel( dx )).T
+            _dX  = numpy.ravel( dx ).reshape((-1,1))
             if selfA._parameters["StoreInternalVariables"] or \
                 selfA._toStore("CurrentState") or \
                 selfA._toStore("CurrentOptimum"):
                 selfA.StoredVariables["CurrentState"].store( Xb + _dX )
-            _HdX = Ht * _dX
-            _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
+            _HdX = Ht @ _dX
+            _HdX = numpy.ravel( _HdX ).reshape((-1,1))
             _dInnovation = Innovation - _HdX
             if selfA._toStore("SimulatedObservationAtCurrentState") or \
                 selfA._toStore("SimulatedObservationAtCurrentOptimum"):
@@ -2244,8 +2276,8 @@ def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
             if selfA._toStore("InnovationAtCurrentState"):
                 selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
             #
-            Jb  = float( 0.5 * _dX.T * BI * _dX )
-            Jo  = float( 0.5 * _dInnovation.T * RI * _dInnovation )
+            Jb  = float( 0.5 * _dX.T * (BI * _dX) )
+            Jo  = float( 0.5 * _dInnovation.T * (RI * _dInnovation) )
             J   = Jb + Jo
             #
             selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
@@ -2274,11 +2306,11 @@ def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
             return J
         #
         def GradientOfCostFunction(dx):
-            _dX          = numpy.asmatrix(numpy.ravel( dx )).T
-            _HdX         = Ht * _dX
-            _HdX         = numpy.asmatrix(numpy.ravel( _HdX )).T
+            _dX          = numpy.ravel( dx )
+            _HdX         = Ht @ _dX
+            _HdX         = numpy.ravel( _HdX ).reshape((-1,1))
             _dInnovation = Innovation - _HdX
-            GradJb       = BI * _dX
+            GradJb       = BI @ _dX
             GradJo       = - Ht.T @ (RI * _dInnovation)
             GradJ        = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
             return GradJ
@@ -2435,8 +2467,10 @@ def incr3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
     return 0
 
 # ==============================================================================
-def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
-    BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
+def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
+    VariantM="MLEF13", BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000,
+    Hybrid=None,
+    ):
     """
     Maximum Likelihood Ensemble Filter
     """
@@ -2498,11 +2532,11 @@ def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
         #
         if U is not None:
             if hasattr(U,"store") and len(U)>1:
-                Un = numpy.asmatrix(numpy.ravel( U[step] )).T
+                Un = numpy.ravel( U[step] ).reshape((-1,1))
             elif hasattr(U,"store") and len(U)==1:
-                Un = numpy.asmatrix(numpy.ravel( U[0] )).T
+                Un = numpy.ravel( U[0] ).reshape((-1,1))
             else:
-                Un = numpy.asmatrix(numpy.ravel( U )).T
+                Un = numpy.ravel( U ).reshape((-1,1))
         else:
             Un = None
         #
@@ -2519,7 +2553,7 @@ def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
             Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
-                Xn_predicted = Xn_predicted + Cm * Un
+                Xn_predicted = Xn_predicted + Cm @ Un
         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
             # --- > Par principe, M = Id, Q = 0
             Xn_predicted = EMX = Xn
@@ -2577,7 +2611,11 @@ def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
                 selfA._parameters["InflationFactor"],
                 )
         #
-        Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+        if Hybrid == "E3DVAR":
+            betaf = selfA._parameters["HybridCovarianceEquilibrium"]
+            Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
+        #
+        Xa = EnsembleMean( Xn )
         #--------------------------
         selfA._setInternalState("Xn", Xn)
         selfA._setInternalState("seed", numpy.random.get_state())
@@ -2591,7 +2629,7 @@ def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
             or selfA._toStore("InnovationAtCurrentAnalysis") \
             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
+            _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
             _Innovation = Ynpu - _HXa
         #
         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
@@ -2623,8 +2661,8 @@ def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
             or selfA._toStore("CostFunctionJo") \
             or selfA._toStore("CurrentOptimum") \
             or selfA._toStore("APosterioriCovariance"):
-            Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
-            Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
+            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
             J   = Jb + Jo
             selfA.StoredVariables["CostFunctionJb"].store( Jb )
             selfA.StoredVariables["CostFunctionJo"].store( Jo )
@@ -2719,7 +2757,7 @@ def mmqr(
         iteration += 1
         #
         Derivees  = numpy.array(fprime(variables))
-        Derivees  = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
+        Derivees  = Derivees.reshape(n,p) # ADAO & check shape
         DeriveesT = Derivees.transpose()
         M         =   numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
         SM        =   numpy.transpose(numpy.dot( DeriveesT , veps ))
@@ -2760,7 +2798,11 @@ def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
     # Initialisation
     # --------------
     if selfA._parameters["EstimationOf"] == "State":
-        M = EM["Direct"].appliedTo
+        M = EM["Direct"].appliedControledFormTo
+        if CM is not None and "Tangent" in CM and U is not None:
+            Cm = CM["Tangent"].asMatrix(Xb)
+        else:
+            Cm = None
         #
         if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
             Xn = numpy.ravel(Xb).reshape((-1,1))
@@ -2789,16 +2831,29 @@ def multi3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle):
         else:
             Ynpu = numpy.ravel( Y ).reshape((-1,1))
         #
+        if U is not None:
+            if hasattr(U,"store") and len(U)>1:
+                Un = numpy.ravel( U[step] ).reshape((-1,1))
+            elif hasattr(U,"store") and len(U)==1:
+                Un = numpy.ravel( U[0] ).reshape((-1,1))
+            else:
+                Un = numpy.ravel( U ).reshape((-1,1))
+        else:
+            Un = None
+        #
         if selfA._parameters["EstimationOf"] == "State": # Forecast
-            Xn_predicted = M( Xn )
+            Xn_predicted = M( (Xn, Un) )
             if selfA._toStore("ForecastState"):
                 selfA.StoredVariables["ForecastState"].store( Xn_predicted )
+            if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
+                Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
+                Xn_predicted = Xn_predicted + Cm @ Un
         elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
             # --- > Par principe, M = Id, Q = 0
             Xn_predicted = Xn
         Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
         #
-        oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
+        oneCycle(selfA, Xn_predicted, Ynpu, None, HO, None, None, R, B, None)
         #
         Xn = selfA.StoredVariables["Analysis"][-1]
         #--------------------------
@@ -2820,7 +2875,7 @@ def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
     else:
         HXb = Hm( Xb )
-    HXb = numpy.asmatrix(numpy.ravel( HXb )).T
+    HXb = numpy.ravel( HXb ).reshape((-1,1))
     if Y.size != HXb.size:
         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
     if max(Y.shape) != max(HXb.shape):
@@ -3039,7 +3094,10 @@ def psas3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
     return 0
 
 # ==============================================================================
-def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16"):
+def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
+    VariantM="KalmanFilterFormula16",
+    Hybrid=None,
+    ):
     """
     Stochastic EnKF
     """
@@ -3103,11 +3161,11 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16"
         #
         if U is not None:
             if hasattr(U,"store") and len(U)>1:
-                Un = numpy.asmatrix(numpy.ravel( U[step] )).T
+                Un = numpy.ravel( U[step] ).reshape((-1,1))
             elif hasattr(U,"store") and len(U)==1:
-                Un = numpy.asmatrix(numpy.ravel( U[0] )).T
+                Un = numpy.ravel( U[0] ).reshape((-1,1))
             else:
-                Un = numpy.asmatrix(numpy.ravel( U )).T
+                Un = numpy.ravel( U ).reshape((-1,1))
         else:
             Un = None
         #
@@ -3127,7 +3185,7 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16"
                 returnSerieAsArrayMatrix = True )
             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
-                Xn_predicted = Xn_predicted + Cm * Un
+                Xn_predicted = Xn_predicted + Cm @ Un
         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
             # --- > Par principe, M = Id, Q = 0
             Xn_predicted = EMX = Xn
@@ -3136,8 +3194,8 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16"
                 returnSerieAsArrayMatrix = True )
         #
         # Mean of forecast and observation of forecast
-        Xfm  = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
-        Hfm  = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
+        Xfm  = EnsembleMean( Xn_predicted )
+        Hfm  = EnsembleMean( HX_predicted )
         #
         #--------------------------
         if VariantM == "KalmanFilterFormula05":
@@ -3177,7 +3235,11 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16"
                 selfA._parameters["InflationFactor"],
                 )
         #
-        Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+        if Hybrid == "E3DVAR":
+            betaf = selfA._parameters["HybridCovarianceEquilibrium"]
+            Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
+        #
+        Xa = EnsembleMean( Xn )
         #--------------------------
         selfA._setInternalState("Xn", Xn)
         selfA._setInternalState("seed", numpy.random.get_state())
@@ -3191,7 +3253,7 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16"
             or selfA._toStore("InnovationAtCurrentAnalysis") \
             or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
             or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
-            _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
+            _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
             _Innovation = Ynpu - _HXa
         #
         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
@@ -3223,8 +3285,8 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16"
             or selfA._toStore("CostFunctionJo") \
             or selfA._toStore("CurrentOptimum") \
             or selfA._toStore("APosterioriCovariance"):
-            Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
-            Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
+            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
             J   = Jb + Jo
             selfA.StoredVariables["CostFunctionJb"].store( Jb )
             selfA.StoredVariables["CostFunctionJo"].store( Jo )
@@ -3524,18 +3586,18 @@ def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
     def Un(_step):
         if U is not None:
             if hasattr(U,"store") and 1<=_step<len(U) :
-                _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
+                _Un = numpy.ravel( U[_step] ).reshape((-1,1))
             elif hasattr(U,"store") and len(U)==1:
-                _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
+                _Un = numpy.ravel( U[0] ).reshape((-1,1))
             else:
-                _Un = numpy.asmatrix(numpy.ravel( U )).T
+                _Un = numpy.ravel( U ).reshape((-1,1))
         else:
             _Un = None
         return _Un
     def CmUn(_xn,_un):
         if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
             _Cm   = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
-            _CmUn = _Cm * _un
+            _CmUn = (_Cm @ _un).reshape((-1,1))
         else:
             _CmUn = 0.
         return _CmUn
@@ -3563,26 +3625,26 @@ def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
     selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
     selfA.DirectInnovation  = [None,] # Le pas 0 n'est pas observé
     def CostFunction(x):
-        _X  = numpy.asmatrix(numpy.ravel( x )).T
+        _X  = numpy.ravel( x ).reshape((-1,1))
         if selfA._parameters["StoreInternalVariables"] or \
             selfA._toStore("CurrentState") or \
             selfA._toStore("CurrentOptimum"):
             selfA.StoredVariables["CurrentState"].store( _X )
-        Jb  = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
+        Jb  = float( 0.5 * (_X - Xb).T * (BI * (_X - Xb)) )
         selfA.DirectCalculation = [None,]
         selfA.DirectInnovation  = [None,]
         Jo  = 0.
         _Xn = _X
         for step in range(0,duration-1):
             if hasattr(Y,"store"):
-                _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
+                _Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
             else:
-                _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
+                _Ynpu = numpy.ravel( Y ).reshape((-1,1))
             _Un = Un(step)
             #
             # Etape d'évolution
             if selfA._parameters["EstimationOf"] == "State":
-                _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
+                _Xn = Mm( (_Xn, _Un) ).reshape((-1,1)) + CmUn(_Xn, _Un)
             elif selfA._parameters["EstimationOf"] == "Parameters":
                 pass
             #
@@ -3591,16 +3653,16 @@ def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
             #
             # Etape de différence aux observations
             if selfA._parameters["EstimationOf"] == "State":
-                _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
+                _YmHMX = _Ynpu - numpy.ravel( Hm( (_Xn, None) ) ).reshape((-1,1))
             elif selfA._parameters["EstimationOf"] == "Parameters":
-                _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
+                _YmHMX = _Ynpu - numpy.ravel( Hm( (_Xn, _Un) ) ).reshape((-1,1)) - CmUn(_Xn, _Un)
             #
             # Stockage de l'état
             selfA.DirectCalculation.append( _Xn )
             selfA.DirectInnovation.append( _YmHMX )
             #
             # Ajout dans la fonctionnelle d'observation
-            Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
+            Jo = Jo + 0.5 * float( _YmHMX.T * (RI * _YmHMX) )
         J = Jb + Jo
         #
         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
@@ -3626,7 +3688,7 @@ def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         return J
     #
     def GradientOfCostFunction(x):
-        _X      = numpy.asmatrix(numpy.ravel( x )).T
+        _X      = numpy.ravel( x ).reshape((-1,1))
         GradJb  = BI * (_X - Xb)
         GradJo  = 0.
         for step in range(duration-1,0,-1):
@@ -3805,19 +3867,19 @@ def stdkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         #
         if U is not None:
             if hasattr(U,"store") and len(U)>1:
-                Un = numpy.asmatrix(numpy.ravel( U[step] )).T
+                Un = numpy.ravel( U[step] ).reshape((-1,1))
             elif hasattr(U,"store") and len(U)==1:
-                Un = numpy.asmatrix(numpy.ravel( U[0] )).T
+                Un = numpy.ravel( U[0] ).reshape((-1,1))
             else:
-                Un = numpy.asmatrix(numpy.ravel( U )).T
+                Un = numpy.ravel( U ).reshape((-1,1))
         else:
             Un = None
         #
         if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
-            Xn_predicted = Mt * Xn
+            Xn_predicted = Mt @ Xn
             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
                 Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
-                Xn_predicted = Xn_predicted + Cm * Un
+                Xn_predicted = Xn_predicted + Cm @ Un
             Pn_predicted = Q + Mt * (Pn * Ma)
         elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
             # --- > Par principe, M = Id, Q = 0
@@ -3825,13 +3887,13 @@ def stdkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
             Pn_predicted = Pn
         #
         if selfA._parameters["EstimationOf"] == "State":
-            HX_predicted = Ht * Xn_predicted
+            HX_predicted = Ht @ Xn_predicted
             _Innovation  = Ynpu - HX_predicted
         elif selfA._parameters["EstimationOf"] == "Parameters":
-            HX_predicted = Ht * Xn_predicted
+            HX_predicted = Ht @ Xn_predicted
             _Innovation  = Ynpu - HX_predicted
             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
-                _Innovation = _Innovation - Cm * Un
+                _Innovation = _Innovation - Cm @ Un
         #
         Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
         Xn = Xn_predicted + Kn * _Innovation
@@ -3872,8 +3934,8 @@ def stdkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
             or selfA._toStore("CostFunctionJo") \
             or selfA._toStore("CurrentOptimum") \
             or selfA._toStore("APosterioriCovariance"):
-            Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
-            Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
+            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+            Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
             J   = Jb + Jo
             selfA.StoredVariables["CostFunctionJb"].store( Jb )
             selfA.StoredVariables["CostFunctionJo"].store( Jo )
@@ -4025,7 +4087,7 @@ def uskf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
                 XEtnnpi = numpy.asarray( Mm( (Xnp[:,point], Un) ) ).reshape((-1,1))
                 if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
                     Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
-                    XEtnnpi = XEtnnpi + Cm * Un
+                    XEtnnpi = XEtnnpi + Cm @ Un
             elif selfA._parameters["EstimationOf"] == "Parameters":
                 # --- > Par principe, M = Id, Q = 0
                 XEtnnpi = Xnp[:,point]
@@ -4063,7 +4125,7 @@ def uskf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         _Innovation  = Ynpu - Yncm.reshape((-1,1))
         if selfA._parameters["EstimationOf"] == "Parameters":
             if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
-                _Innovation = _Innovation - Cm * Un
+                _Innovation = _Innovation - Cm @ Un
         #
         Kn = Pxyn * Pyyn.I
         Xn = Xncm.reshape((-1,1)) + Kn * _Innovation
@@ -4104,7 +4166,7 @@ def uskf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
             or selfA._toStore("CostFunctionJo") \
             or selfA._toStore("CurrentOptimum") \
             or selfA._toStore("APosterioriCovariance"):
-            Jb  = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
+            Jb  = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
             Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
             J   = Jb + Jo
             selfA.StoredVariables["CostFunctionJb"].store( Jb )
@@ -4170,14 +4232,14 @@ def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
     # Définition de la fonction-coût
     # ------------------------------
     def CostFunction(v):
-        _V = numpy.asmatrix(numpy.ravel( v )).T
+        _V = numpy.ravel( v ).reshape((-1,1))
         _X = Xb + B * _V
         if selfA._parameters["StoreInternalVariables"] or \
             selfA._toStore("CurrentState") or \
             selfA._toStore("CurrentOptimum"):
             selfA.StoredVariables["CurrentState"].store( _X )
         _HX = Hm( _X )
-        _HX = numpy.asmatrix(numpy.ravel( _HX )).T
+        _HX = numpy.ravel( _HX ).reshape((-1,1))
         _Innovation = Y - _HX
         if selfA._toStore("SimulatedObservationAtCurrentState") or \
             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
@@ -4185,8 +4247,8 @@ def van3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
         if selfA._toStore("InnovationAtCurrentState"):
             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
         #
-        Jb  = float( 0.5 * _V.T * BT * _V )
-        Jo  = float( 0.5 * _Innovation.T * RI * _Innovation )
+        Jb  = float( 0.5 * _V.T * (BT * _V) )
+        Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
         J   = Jb + Jo
         #
         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
index 9101abe8ebf89f121e8c528be6e508d40ebd903f..533e729ee408a06a6097c4d141801152d0aaf72d 100644 (file)
@@ -49,6 +49,7 @@ import sys
 import platform
 import locale
 import logging
+import re
 
 # ==============================================================================
 class PlatformInfo(object):
@@ -293,6 +294,34 @@ def date2int( __date, __lang="FR" ):
         raise ValueError("Cannot convert \"%s\" as a D/M/Y H:M date"%d)
     return __number
 
+def strvect2liststr( __strvect ):
+    """
+    Fonction de secours, conversion d'une chaîne de caractères de
+    représentation de vecteur en une liste de chaînes de caractères de
+    représentation de flottants
+    """
+    for s in ("array", "matrix", "list", "tuple", "[", "]", "(", ")"):
+        __strvect = __strvect.replace(s,"")  # Rien
+    for s in (",", ";"):
+        __strvect = __strvect.replace(s," ") # Blanc
+    return __strvect.split()
+
+def strmatrix2liststr( __strvect ):
+    """
+    Fonction de secours, conversion d'une chaîne de caractères de
+    représentation de matrice en une liste de chaînes de caractères de
+    représentation de flottants
+    """
+    for s in ("array", "matrix", "list", "tuple", "[", "(", "'", '"'):
+        __strvect = __strvect.replace(s,"")  # Rien
+    __strvect = __strvect.replace(","," ") # Blanc
+    for s in ("]", ")"):
+        __strvect = __strvect.replace(s,";") # "]" et ")" par ";"
+    __strvect = re.sub(';\s*;',';',__strvect)
+    __strvect = __strvect.rstrip(";") # Après ^ et avant v
+    __strmat = [l.split() for l in __strvect.split(";")]
+    return __strmat
+
 def checkFileNameConformity( __filename, __warnInsteadOfPrint=True ):
     if sys.platform.startswith("win") and len(__filename) > 256:
         __conform = False