After that point, all the modifications, executions and post-processing of the
data assimilation scheme will be done in the YACS module. In order to check the
-result in a simple way, we use the "*UserPostAnalysis*" node (or we create here
+result in a simple way, we can use the "*UserPostAnalysis*" node (or we create
a new YACS node by using the "*in-line script node*" sub-menu of the YACS
graphical view).
**YACS menu for Container Log, and dialog window showing the log**
We verify that the result is correct by checking that the log dialog window
-contains the following line:
+contains information similar to the following line:
::
Analysis = [0.5, 0.5, 0.5]
Après ce point, toutes les modifications, exécutions et post-processing du
schéma d'assimilation de données seront effectués dans le module YACS. De façon
-à vérifier les résultats d'une manière simple, on utilise le noeud
-"*UserPostAnalysis*" (ou on crée ici un nouveau noeud YACS par le sous-menu
-"*Noeud de script in-line*" dans la vue graphique de YACS).
+à vérifier les résultats d'une manière simple, on peut utiliser le noeud
+"*UserPostAnalysis*" (ou on crée un nouveau noeud YACS par le sous-menu "*Noeud
+de script in-line*" dans la vue graphique de YACS).
Ce noeud de script va récupérer l'analyse issue de l'assimilation de données
depuis le port de sortie "*algoResults*" du bloc de calcul (qui donne accés à
**Menu YACS de la fenêtre de sortie, et boite de dialogue montrant la sortie**
On vérifie que le résultat est correct en observant si la fenêtre de sortie
-contient la ligne suivante :
+contient des informations identiques à la ligne suivante :
::
Analysis = [0.5, 0.5, 0.5]
data=_F(FROM='ScriptWithSwitch',
SCRIPTWITHSWITCH_FILE='test005_ADAO_scripts_for_JDC.py',),),
UserPostAnalysis=_F(FROM='String',
- STRING=
+ STRING=\
"""import numpy
Xb = ADD.get("Background")
Xa = ADD.get("Analysis")[-1]
print()
-print("Size of Background...........= %i"%len(Xb.A1))
+print("Size of Background...........= %i"%len(Xb))
print("Size of Analysis.............= %i"%len(Xa))
print("Min, mean, max of Analysis...= %8.3f, %8.3f, %8.3f"%(min(Xa),numpy.mean(Xa),max(Xa)))
print()
""",),);
-#VERSION_CATALOGUE:V8_3_0:FIN VERSION_CATALOGUE
+#VERSION_CATALOGUE:V9_8_0:FIN VERSION_CATALOGUE
-# -*- coding: utf-8 -*-
+#-*- coding: utf-8 -*-
study_config = {}
study_config['StudyType'] = 'ASSIMILATION_STUDY'
study_config['Name'] = 'Test'
outputvariables_config['adao_default'] = -1
study_config['OutputVariables'] = outputvariables_config
study_config['Repertory'] = '@prefix@/share/adao_examples/daSalome'
+study_config['ExecuteInContainer'] = 'No'
Analysis_config = {}
Analysis_config['From'] = 'String'
Analysis_config['Data'] = """import numpy
Xb = ADD.get("Background")
Xa = ADD.get("Analysis")[-1]
print()
-print("Size of Background...........= %i"%len(Xb.A1))
+print("Size of Background...........= %i"%len(Xb))
print("Size of Analysis.............= %i"%len(Xa))
print("Min, mean, max of Analysis...= %8.3f, %8.3f, %8.3f"%(min(Xa),numpy.mean(Xa),max(Xa)))
print()
#
# Verifying the results by printing
# ---------------------------------
-print("")
-print("obs = [%s]"%(", ".join(["%.4f"%v for v in ADD.get("Observation").A1])))
-print("")
-print("xb = [%s]"%(", ".join(["%.4f"%v for v in ADD.get("Background").A1])))
+print()
+print("obs = [%s]"%(", ".join(["%.4f"%v for v in numpy.ravel(ADD.get("Observation"))])))
+print()
+print("xb = [%s]"%(", ".join(["%.4f"%v for v in numpy.ravel(ADD.get("Background"))])))
print("xt = [%s]"%(", ".join(["%.4f"%v for v in numpy.array(xt)])))
print("xa = [%s]"%(", ".join(["%.4f"%v for v in numpy.array(xa)])))
-print("")
+print()
for i in range( len(x_series) ):
- print("Step %2i : J = %.4e X = [%s]"%(i, J[i], ", ".join(["%.4f"%v for v in x_series[i]])))
-print("")
+ print("Step %2i : J = %.4e et X = [%s]"%(i, J[i], ", ".join(["%.4f"%v for v in x_series[i]])))
+print()
#
# ==============================================================================
Ha = HO["Adjoint"].asMatrix(Xb)
Ha = Ha.reshape(Xb.size,Y.size) # ADAO & check shape
#
- # Utilisation éventuelle d'un vecteur H(Xb) précalculé
- # ----------------------------------------------------
if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
HXb = HO["AppliedInX"]["HXb"]
else:
- HXb = Hm * Xb
- HXb = numpy.asmatrix(numpy.ravel( HXb )).T
+ HXb = Hm @ Xb
+ HXb = HXb.reshape((-1,1))
if Y.size != HXb.size:
raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
if max(Y.shape) != max(HXb.shape):
raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
#
- # Précalcul des inversions de B et R
- # ----------------------------------
BI = B.getI()
RI = R.getI()
#
- # Calcul de l'innovation
- # ----------------------
- d = Y - HXb
+ Innovation = Y - HXb
#
# Calcul de la matrice de gain et de l'analyse
# --------------------------------------------
if Y.size <= Xb.size:
_A = R + numpy.dot(Hm, B * Ha)
- _u = numpy.linalg.solve( _A , d )
+ _u = numpy.linalg.solve( _A , Innovation )
Xa = Xb + B * Ha * _u
else:
_A = BI + numpy.dot(Ha, RI * Hm)
- _u = numpy.linalg.solve( _A , numpy.dot(Ha, RI * d) )
+ _u = numpy.linalg.solve( _A , numpy.dot(Ha, RI * Innovation) )
Xa = Xb + _u
- self.StoredVariables["Analysis"].store( Xa.A1 )
+ self.StoredVariables["Analysis"].store( Xa )
#
# Calcul de la fonction coût
# --------------------------
self._toStore("SimulatedObservationAtCurrentState") or \
self._toStore("SimulatedObservationAtOptimum") or \
self._toStore("SimulationQuantiles"):
- HXa = Hm * Xa
+ HXa = Hm @ Xa
oma = Y - HXa
if self._parameters["StoreInternalVariables"] or \
self._toStore("CostFunctionJ") or self._toStore("CostFunctionJAtCurrentOptimum") or \
self._toStore("CostFunctionJb") or self._toStore("CostFunctionJbAtCurrentOptimum") or \
self._toStore("CostFunctionJo") or self._toStore("CostFunctionJoAtCurrentOptimum") or \
self._toStore("MahalanobisConsistency"):
- Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
- Jo = float( 0.5 * oma.T * RI * oma )
+ Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+ Jo = float( 0.5 * oma.T * (RI * oma) )
J = Jb + Jo
self.StoredVariables["CostFunctionJb"].store( Jb )
self.StoredVariables["CostFunctionJo"].store( Jo )
# Calculs et/ou stockages supplémentaires
# ---------------------------------------
if self._parameters["StoreInternalVariables"] or self._toStore("CurrentState"):
- self.StoredVariables["CurrentState"].store( numpy.ravel(Xa) )
+ self.StoredVariables["CurrentState"].store( Xa )
if self._toStore("CurrentOptimum"):
- self.StoredVariables["CurrentOptimum"].store( numpy.ravel(Xa) )
+ self.StoredVariables["CurrentOptimum"].store( Xa )
if self._toStore("Innovation"):
- self.StoredVariables["Innovation"].store( numpy.ravel(d) )
+ self.StoredVariables["Innovation"].store( Innovation )
if self._toStore("BMA"):
self.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
if self._toStore("OMA"):
- self.StoredVariables["OMA"].store( numpy.ravel(oma) )
+ self.StoredVariables["OMA"].store( oma )
if self._toStore("OMB"):
- self.StoredVariables["OMB"].store( numpy.ravel(d) )
+ self.StoredVariables["OMB"].store( Innovation )
if self._toStore("SigmaObs2"):
TraceR = R.trace(Y.size)
- self.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(oma)).T)) ) / TraceR )
+ self.StoredVariables["SigmaObs2"].store( float( Innovation.T @ oma ) / TraceR )
if self._toStore("SigmaBck2"):
- self.StoredVariables["SigmaBck2"].store( float( (d.T * Hm * (Xa - Xb))/(Hm * B * Hm.T).trace() ) )
+ self.StoredVariables["SigmaBck2"].store( float( (Innovation.T @ (Hm @ (Xa - Xb)))/(Hm * (B * Hm.T)).trace() ) )
if self._toStore("MahalanobisConsistency"):
- self.StoredVariables["MahalanobisConsistency"].store( float( 2.*J/d.size ) )
+ self.StoredVariables["MahalanobisConsistency"].store( float( 2.*J/Innovation.size ) )
if self._toStore("SimulationQuantiles"):
H = HO["Direct"].appliedTo
NumericObjects.QuantilesEstimations(self, A, Xa, HXa, H, Hm)
if self._toStore("SimulatedObservationAtBackground"):
- self.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
+ self.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
if self._toStore("SimulatedObservationAtCurrentState"):
- self.StoredVariables["SimulatedObservationAtCurrentState"].store( numpy.ravel(HXa) )
+ self.StoredVariables["SimulatedObservationAtCurrentState"].store( HXa )
if self._toStore("SimulatedObservationAtCurrentOptimum"):
- self.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( numpy.ravel(HXa) )
+ self.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( HXa )
if self._toStore("SimulatedObservationAtOptimum"):
- self.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
+ self.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
#
self._post_run(HO)
return 0
default = "AugmentedWeightedLeastSquares",
typecast = str,
message = "Critère de qualité utilisé",
- listval = ["AugmentedWeightedLeastSquares","AWLS","DA",
- "WeightedLeastSquares","WLS",
- "LeastSquares","LS","L2",
- "AbsoluteValue","L1",
- "MaximumError","ME"],
+ listval = [
+ "AugmentedWeightedLeastSquares","AWLS","DA",
+ "WeightedLeastSquares","WLS",
+ "LeastSquares","LS","L2",
+ "AbsoluteValue","L1",
+ "MaximumError","ME",
+ ],
)
self.defineRequiredParameter(
name = "StoreInternalVariables",
if QualityMeasure in ["AugmentedWeightedLeastSquares","AWLS","DA"]:
if BI is None or RI is None:
raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
- Jb = 0.5 * (_X - Xb).T * BI * (_X - Xb)
- Jo = 0.5 * (_Innovation).T * RI * (_Innovation)
+ Jb = 0.5 * (_X - Xb).T * (BI * (_X - Xb))
+ Jo = 0.5 * _Innovation.T * (RI * _Innovation)
elif QualityMeasure in ["WeightedLeastSquares","WLS"]:
if RI is None:
raise ValueError("Observation error covariance matrix has to be properly defined!")
#
# Obtention de l'analyse
# ----------------------
- Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
+ Xa = numpy.ravel( Minimum )
#
- self.StoredVariables["Analysis"].store( Xa.A1 )
+ self.StoredVariables["Analysis"].store( Xa )
#
# Calculs et/ou stockages supplémentaires
# ---------------------------------------
- if self._toStore("OMA" ) or \
+ if self._toStore("OMA") or \
self._toStore("SimulatedObservationAtOptimum"):
if self._toStore("SimulatedObservationAtCurrentState"):
HXa = self.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
else:
HXa = Hm(Xa)
if self._toStore("Innovation") or \
- self._toStore("OMB"):
- d = Y - HXb
+ self._toStore("OMB") or \
+ self._toStore("SimulatedObservationAtBackground"):
+ HXb = Hm(Xb)
+ Innovation = Y - HXb
if self._toStore("Innovation"):
- self.StoredVariables["Innovation"].store( numpy.ravel(d) )
+ self.StoredVariables["Innovation"].store( Innovation )
if self._toStore("OMB"):
- self.StoredVariables["OMB"].store( numpy.ravel(d) )
+ self.StoredVariables["OMB"].store( Innovation )
if self._toStore("BMA"):
self.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
if self._toStore("OMA"):
self.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
if self._toStore("SimulatedObservationAtBackground"):
- self.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(Hm(Xb)) )
+ self.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
if self._toStore("SimulatedObservationAtOptimum"):
- self.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
+ self.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
#
self._post_run()
return 0
if QualityMeasure in ["AugmentedWeightedLeastSquares","AWLS","DA"]:
if BI is None or RI is None:
raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
- Jb = 0.5 * (_X - Xb).T * BI * (_X - Xb)
- Jo = 0.5 * (_Innovation).T * RI * (_Innovation)
+ Jb = 0.5 * (_X - Xb).T * (BI * (_X - Xb))
+ Jo = 0.5 * _Innovation.T * (RI * _Innovation)
elif QualityMeasure in ["WeightedLeastSquares","WLS"]:
if RI is None:
raise ValueError("Observation error covariance matrix has to be properly defined!")
#
# Calculs et/ou stockages supplémentaires
# ---------------------------------------
- if self._toStore("OMA") or self._toStore("SimulatedObservationAtOptimum"):
+ if self._toStore("OMA") or \
+ self._toStore("SimulatedObservationAtOptimum"):
if self._toStore("SimulatedObservationAtCurrentState"):
HXa = self.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
elif self._toStore("SimulatedObservationAtCurrentOptimum"):
else:
HXa = Hm(Xa)
if self._toStore("Innovation") or \
- self._toStore("OMB"):
- d = Y - HXb
+ self._toStore("OMB") or \
+ self._toStore("SimulatedObservationAtBackground"):
+ HXb = Hm(Xb)
+ Innovation = Y - HXb
if self._toStore("Innovation"):
- self.StoredVariables["Innovation"].store( numpy.ravel(d) )
+ self.StoredVariables["Innovation"].store( Innovation )
if self._toStore("OMB"):
- self.StoredVariables["OMB"].store( numpy.ravel(d) )
+ self.StoredVariables["OMB"].store( Innovation )
if self._toStore("BMA"):
self.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
if self._toStore("OMA"):
self.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
if self._toStore("SimulatedObservationAtBackground"):
- self.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(Hm(Xb)) )
+ self.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
if self._toStore("SimulatedObservationAtOptimum"):
- self.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
+ self.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
#
self._post_run()
return 0
message = "Liste de calculs supplémentaires à stocker et/ou effectuer",
listval = [
"Analysis",
+ "CurrentOptimum",
"CurrentState",
"Innovation",
"SimulatedObservationAtBackground",
#
# Initialisation des opérateurs d'observation et de la matrice gain
# -----------------------------------------------------------------
- Hm = HO["Tangent"].asMatrix(None)
- Hm = Hm.reshape(Y.size,Xb[0].size) # ADAO & check shape
- Ha = HO["Adjoint"].asMatrix(None)
- Ha = Ha.reshape(Xb[0].size,Y.size) # ADAO & check shape
+ Xbm = Xb.mean()
+ Hm = HO["Tangent"].asMatrix(Xbm)
+ Hm = Hm.reshape(Y.size,Xbm.size) # ADAO & check shape
+ Ha = HO["Adjoint"].asMatrix(Xbm)
+ Ha = Ha.reshape(Xbm.size,Y.size) # ADAO & check shape
#
# Calcul de la matrice de gain dans l'espace le plus petit et de l'analyse
# ------------------------------------------------------------------------
if Y.size <= Xb[0].size:
- K = B * Ha * (R + Hm * B * Ha).I
+ K = B * Ha * (R + Hm * (B * Ha)).I
else:
- K = (BI + Ha * RI * Hm).I * Ha * RI
+ K = (BI + Ha * (RI * Hm)).I * Ha * RI
#
# Calcul du BLUE pour chaque membre de l'ensemble
# -----------------------------------------------
for iens in range(nb_ens):
- HXb = numpy.ravel(numpy.dot(Hm, Xb[iens]))
+ HXb = Hm @ Xb[iens]
if self._toStore("SimulatedObservationAtBackground"):
self.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
- d = numpy.ravel(EnsembleY[:,iens]) - HXb
+ Innovation = numpy.ravel(EnsembleY[:,iens]) - numpy.ravel(HXb)
if self._toStore("Innovation"):
- self.StoredVariables["Innovation"].store( d )
- Xa = numpy.ravel(Xb[iens]) + numpy.dot(K, d)
+ self.StoredVariables["Innovation"].store( Innovation )
+ Xa = Xb[iens] + K @ Innovation
self.StoredVariables["CurrentState"].store( Xa )
if self._toStore("SimulatedObservationAtCurrentState"):
- self.StoredVariables["SimulatedObservationAtCurrentState"].store( numpy.dot(Hm, Xa) )
+ self.StoredVariables["SimulatedObservationAtCurrentState"].store( Hm @ numpy.ravel(Xa) )
#
# Fabrication de l'analyse
# ------------------------
Members = self.StoredVariables["CurrentState"][-nb_ens:]
Xa = numpy.array( Members ).mean(axis=0)
self.StoredVariables["Analysis"].store( Xa )
+ if self._toStore("CurrentOptimum"):
+ self.StoredVariables["CurrentOptimum"].store( Xa )
if self._toStore("SimulatedObservationAtOptimum"):
- self.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.dot(Hm, Xa) )
+ self.StoredVariables["SimulatedObservationAtOptimum"].store( Hm @ numpy.ravel(Xa) )
#
self._post_run(HO)
return 0
"MLEF",
"IEnKF",
"EnKS",
+ "E3DVAR",
],
listadv = [
"StochasticEnKF",
"IEnKF-B",
"EnKS-KFF",
"IEKF",
+ "E3DVAR-EnKF",
+ "E3DVAR-ETKF",
+ "E3DVAR-MLEF",
],
)
self.defineRequiredParameter(
message = "Nombre d'intervalles de temps de lissage dans le passé",
minval = 0,
)
+ self.defineRequiredParameter(
+ name = "HybridCovarianceEquilibrium",
+ default = 0.5,
+ typecast = float,
+ message = "Facteur d'équilibre entre la covariance statique et la covariance d'ensemble",
+ minval = 0.,
+ maxval = 1.,
+ )
self.defineRequiredParameter(
name = "SetSeed",
typecast = numpy.random.seed,
NumericObjects.enks(self, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="EnKS16-KalmanFilterFormula")
#
#--------------------------
+ # Default E3DVAR = E3DVAR-EnKF
+ elif self._parameters["Variant"] in ["E3DVAR-EnKF", "E3DVAR"]:
+ NumericObjects.senkf(self, Xb, Y, U, HO, EM, CM, R, B, Q, Hybrid="E3DVAR")
+ #
+ elif self._parameters["Variant"] == "E3DVAR-ETKF":
+ NumericObjects.etkf(self, Xb, Y, U, HO, EM, CM, R, B, Q, Hybrid="E3DVAR")
+ #
+ elif self._parameters["Variant"] == "E3DVAR-MLEF":
+ NumericObjects.mlef(self, Xb, Y, U, HO, EM, CM, R, B, Q, Hybrid="E3DVAR")
+ #
+ #--------------------------
else:
raise ValueError("Error in Variant name: %s"%self._parameters["Variant"])
#
Ha = Ha.reshape(Xb.size,Y.size) # ADAO & check shape
H = HO["Direct"].appliedTo
#
- # Utilisation éventuelle d'un vecteur H(Xb) précalculé
- # ----------------------------------------------------
if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
HXb = H( Xb, HO["AppliedInX"]["HXb"])
else:
HXb = H( Xb )
- HXb = numpy.asmatrix(numpy.ravel( HXb )).T
+ HXb = HXb.reshape((-1,1))
if Y.size != HXb.size:
raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
if max(Y.shape) != max(HXb.shape):
raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
#
- # Précalcul des inversions de B et R
- # ----------------------------------
BI = B.getI()
RI = R.getI()
#
- # Calcul de l'innovation
- # ----------------------
- d = Y - HXb
+ Innovation = Y - HXb
#
# Calcul de la matrice de gain et de l'analyse
# --------------------------------------------
if Y.size <= Xb.size:
_A = R + numpy.dot(Hm, B * Ha)
- _u = numpy.linalg.solve( _A , d )
+ _u = numpy.linalg.solve( _A , Innovation )
Xa = Xb + B * Ha * _u
else:
_A = BI + numpy.dot(Ha, RI * Hm)
- _u = numpy.linalg.solve( _A , numpy.dot(Ha, RI * d) )
+ _u = numpy.linalg.solve( _A , numpy.dot(Ha, RI * Innovation) )
Xa = Xb + _u
- self.StoredVariables["Analysis"].store( Xa.A1 )
+ self.StoredVariables["Analysis"].store( Xa )
#
# Calcul de la fonction coût
# --------------------------
self._toStore("SimulatedObservationAtCurrentState") or \
self._toStore("SimulatedObservationAtOptimum") or \
self._toStore("SimulationQuantiles"):
- HXa = numpy.matrix(numpy.ravel( H( Xa ) )).T
+ HXa = H( Xa ).reshape((-1,1))
oma = Y - HXa
if self._parameters["StoreInternalVariables"] or \
self._toStore("CostFunctionJ") or self._toStore("CostFunctionJAtCurrentOptimum") or \
self._toStore("CostFunctionJb") or self._toStore("CostFunctionJbAtCurrentOptimum") or \
self._toStore("CostFunctionJo") or self._toStore("CostFunctionJoAtCurrentOptimum") or \
self._toStore("MahalanobisConsistency"):
- Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
- Jo = float( 0.5 * oma.T * RI * oma )
+ Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+ Jo = float( 0.5 * oma.T * (RI * oma) )
J = Jb + Jo
self.StoredVariables["CostFunctionJb"].store( Jb )
self.StoredVariables["CostFunctionJo"].store( Jo )
# Calculs et/ou stockages supplémentaires
# ---------------------------------------
if self._parameters["StoreInternalVariables"] or self._toStore("CurrentState"):
- self.StoredVariables["CurrentState"].store( numpy.ravel(Xa) )
+ self.StoredVariables["CurrentState"].store( Xa )
if self._toStore("CurrentOptimum"):
- self.StoredVariables["CurrentOptimum"].store( numpy.ravel(Xa) )
+ self.StoredVariables["CurrentOptimum"].store( Xa )
if self._toStore("Innovation"):
- self.StoredVariables["Innovation"].store( numpy.ravel(d) )
+ self.StoredVariables["Innovation"].store( Innovation )
if self._toStore("BMA"):
self.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
if self._toStore("OMA"):
- self.StoredVariables["OMA"].store( numpy.ravel(oma) )
+ self.StoredVariables["OMA"].store( oma )
if self._toStore("OMB"):
- self.StoredVariables["OMB"].store( numpy.ravel(d) )
+ self.StoredVariables["OMB"].store( Innovation )
if self._toStore("SigmaObs2"):
TraceR = R.trace(Y.size)
- self.StoredVariables["SigmaObs2"].store( float( (d.T * (numpy.asmatrix(numpy.ravel(oma)).T)) ) / TraceR )
+ self.StoredVariables["SigmaObs2"].store( float( Innovation.T @ oma ) / TraceR )
if self._toStore("SigmaBck2"):
- self.StoredVariables["SigmaBck2"].store( float( (d.T * Hm * (Xa - Xb))/(Hm * B * Hm.T).trace() ) )
+ self.StoredVariables["SigmaBck2"].store( float( (Innovation.T @ (Hm @ (Xa - Xb)))/(Hm * (B * Hm.T)).trace() ) )
if self._toStore("MahalanobisConsistency"):
- self.StoredVariables["MahalanobisConsistency"].store( float( 2.*J/d.size ) )
+ self.StoredVariables["MahalanobisConsistency"].store( float( 2.*J/Innovation.size ) )
if self._toStore("SimulationQuantiles"):
HtM = HO["Tangent"].asMatrix(ValueForMethodForm = Xa)
HtM = HtM.reshape(Y.size,Xa.size) # ADAO & check shape
NumericObjects.QuantilesEstimations(self, A, Xa, HXa, H, HtM)
if self._toStore("SimulatedObservationAtBackground"):
- self.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
+ self.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
if self._toStore("SimulatedObservationAtCurrentState"):
- self.StoredVariables["SimulatedObservationAtCurrentState"].store( numpy.ravel(HXa) )
+ self.StoredVariables["SimulatedObservationAtCurrentState"].store( HXa )
if self._toStore("SimulatedObservationAtCurrentOptimum"):
- self.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( numpy.ravel(HXa) )
+ self.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( HXa )
if self._toStore("SimulatedObservationAtOptimum"):
- self.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
+ self.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
#
self._post_run(HO)
return 0
#
# Calcul de la matrice de gain et de l'analyse
# --------------------------------------------
- K = (Ha * RI * Hm).I * Ha * RI
+ K = (Ha * (RI * Hm)).I * Ha * RI
Xa = K * Y
self.StoredVariables["Analysis"].store( Xa )
#
self._toStore("CostFunctionJb") or self._toStore("CostFunctionJbAtCurrentOptimum") or \
self._toStore("CostFunctionJo") or self._toStore("CostFunctionJoAtCurrentOptimum"):
Jb = 0.
- Jo = float( 0.5 * oma.T * RI * oma )
+ Jo = float( 0.5 * oma.T * (RI * oma) )
J = Jb + Jo
self.StoredVariables["CostFunctionJb"].store( Jb )
self.StoredVariables["CostFunctionJo"].store( Jo )
if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
HXb = HO["AppliedInX"]["HXb"]
else:
- HXb = Ht * Xb
+ HXb = Ht @ Xb
HXb = numpy.asmatrix(numpy.ravel( HXb )).T
if Y.size != HXb.size:
raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
default = "LBFGSB",
typecast = str,
message = "Minimiseur utilisé",
- listval = ["LBFGSB","TNC", "CG", "NCG", "BFGS", "LM"],
+ listval = [
+ "LBFGSB",
+ "TNC",
+ "CG",
+ "NCG",
+ "BFGS",
+ "LM",
+ ],
)
self.defineRequiredParameter(
name = "MaximumNumberOfSteps",
)
self.defineRequiredParameter( # Pas de type
name = "Bounds",
- message = "Liste des valeurs de bornes",
+ message = "Liste des paires de bornes",
)
self.defineRequiredParameter(
name = "InitializationPoint",
def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
self._pre_run(Parameters, Xb, Y, U, HO, EM, CM, R, B, Q)
#
- # Opérateurs
- # ----------
+ # Initialisations
+ # ---------------
Hm = HO["Direct"].appliedTo
Ha = HO["Adjoint"].appliedInXTo
#
- # Utilisation éventuelle d'un vecteur H(Xb) précalculé
- # ----------------------------------------------------
if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
else:
HXb = Hm( Xb )
- HXb = numpy.asmatrix(numpy.ravel( HXb )).T
+ HXb = HXb.reshape((-1,1))
if Y.size != HXb.size:
raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
if max(Y.shape) != max(HXb.shape):
if self._parameters["Minimizer"] == "LM":
RdemiI = R.choleskyI()
#
+ Xini = self._parameters["InitializationPoint"]
+ #
# Définition de la fonction-coût
# ------------------------------
def CostFunction(x):
- _X = numpy.asmatrix(numpy.ravel( x )).T
+ _X = numpy.ravel( x ).reshape((-1,1))
if self._parameters["StoreInternalVariables"] or \
self._toStore("CurrentState") or \
self._toStore("CurrentOptimum"):
self.StoredVariables["CurrentState"].store( _X )
- _HX = Hm( _X )
- _HX = numpy.asmatrix(numpy.ravel( _HX )).T
+ _HX = Hm( _X ).reshape((-1,1))
_Innovation = Y - _HX
if self._toStore("SimulatedObservationAtCurrentState") or \
self._toStore("SimulatedObservationAtCurrentOptimum"):
self.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
#
Jb = 0.
- Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
+ Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
J = Jb + Jo
#
self.StoredVariables["CurrentIterationNumber"].store( len(self.StoredVariables["CostFunctionJ"]) )
return J
#
def GradientOfCostFunction(x):
- _X = numpy.asmatrix(numpy.ravel( x )).T
- _HX = Hm( _X )
- _HX = numpy.asmatrix(numpy.ravel( _HX )).T
+ _X = x.reshape((-1,1))
+ _HX = Hm( _X ).reshape((-1,1))
GradJb = 0.
GradJo = - Ha( (_X, RI * (Y - _HX)) )
- GradJ = numpy.asmatrix( numpy.ravel( GradJb ) + numpy.ravel( GradJo ) ).T
- return GradJ.A1
+ GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
+ return GradJ
#
def CostFunctionLM(x):
- _X = numpy.asmatrix(numpy.ravel( x )).T
- _HX = Hm( _X )
- _HX = numpy.asmatrix(numpy.ravel( _HX )).T
+ _X = numpy.ravel( x ).reshape((-1,1))
+ _HX = Hm( _X ).reshape((-1,1))
_Innovation = Y - _HX
Jb = 0.
- Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
+ Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
J = Jb + Jo
if self._parameters["StoreInternalVariables"] or \
self._toStore("CurrentState"):
return numpy.ravel( RdemiI*_Innovation )
#
def GradientOfCostFunctionLM(x):
- _X = numpy.asmatrix(numpy.ravel( x )).T
- _HX = Hm( _X )
- _HX = numpy.asmatrix(numpy.ravel( _HX )).T
- GradJb = 0.
- GradJo = - Ha( (_X, RI * (Y - _HX)) )
- GradJ = numpy.asmatrix( numpy.ravel( GradJb ) + numpy.ravel( GradJo ) ).T
+ _X = x.reshape((-1,1))
return - RdemiI*HO["Tangent"].asMatrix( _X )
#
- # Point de démarrage de l'optimisation : Xini = Xb
- # ------------------------------------
- Xini = self._parameters["InitializationPoint"]
- #
# Minimisation de la fonctionnelle
# --------------------------------
nbPreviousSteps = self.StoredVariables["CostFunctionJ"].stepnumber()
#
if self._parameters["Minimizer"] == "LBFGSB":
- # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b(
if "0.19" <= scipy.version.version <= "1.1.0":
import lbfgsbhlt as optimiseur
else:
if self._parameters["StoreInternalVariables"] or self._toStore("CurrentState"):
Minimum = self.StoredVariables["CurrentState"][IndexMin]
#
- # Obtention de l'analyse
- # ----------------------
- Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
+ Xa = Minimum
+ #--------------------------
#
self.StoredVariables["Analysis"].store( Xa )
#
else:
HXa = Hm( Xa )
#
- #
# Calculs et/ou stockages supplémentaires
# ---------------------------------------
if self._toStore("Innovation") or \
self._toStore("OMB"):
- d = Y - HXb
+ Innovation = Y - HXb
if self._toStore("Innovation"):
- self.StoredVariables["Innovation"].store( d )
+ self.StoredVariables["Innovation"].store( Innovation )
if self._toStore("BMA"):
self.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
if self._toStore("OMA"):
self.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
if self._toStore("OMB"):
- self.StoredVariables["OMB"].store( d )
+ self.StoredVariables["OMB"].store( Innovation )
if self._toStore("SimulatedObservationAtBackground"):
self.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
if self._toStore("SimulatedObservationAtOptimum"):
Phip = 1. - Phig
logging.debug("%s Taux de rappel au meilleur insecte du groupe (entre 0 et 1) = %s et à la meilleure position précédente (son complémentaire à 1) = %s"%(self._name, str(Phig), str(Phip)))
#
- # Opérateur d'observation
- # -----------------------
Hm = HO["Direct"].appliedTo
#
- # Précalcul des inversions de B et R
- # ----------------------------------
BI = B.getI()
RI = R.getI()
#
#
return J
#
- # Point de démarrage de l'optimisation : Xini = Xb
- # ------------------------------------
- if isinstance(Xb, type(numpy.matrix([]))):
- Xini = Xb.A1.tolist()
- elif Xb is not None:
- Xini = list(Xb)
+ if Xb is not None:
+ Xini = numpy.ravel(Xb)
else:
Xini = numpy.zeros(len(BoxBounds[:,0]))
#
- # Initialisation des bornes
- # -------------------------
SpaceUp = BoxBounds[:,1] + Xini
SpaceLow = BoxBounds[:,0] + Xini
nbparam = len(SpaceUp)
# Calculs et/ou stockages supplémentaires
# ---------------------------------------
if self._toStore("Innovation"):
- self.StoredVariables["Innovation"].store( numpy.ravel(d) )
+ self.StoredVariables["Innovation"].store( d )
if self._toStore("BMA"):
self.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
if self._toStore("OMA"):
self.StoredVariables["OMA"].store( numpy.ravel(Y) - numpy.ravel(HXa) )
if self._toStore("OMB"):
- self.StoredVariables["OMB"].store( numpy.ravel(d) )
+ self.StoredVariables["OMB"].store( d )
if self._toStore("SimulatedObservationAtBackground"):
- self.StoredVariables["SimulatedObservationAtBackground"].store( numpy.ravel(HXb) )
+ self.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
if self._toStore("SimulatedObservationAtOptimum"):
- self.StoredVariables["SimulatedObservationAtOptimum"].store( numpy.ravel(HXa) )
+ self.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
#
self._post_run(HO)
return 0
Arguments :
- name : nom d'opérateur
- fromMethod : argument de type fonction Python
- - fromMatrix : argument adapté au constructeur numpy.matrix
+ - fromMatrix : argument adapté au constructeur numpy.array/matrix
- avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
- reducingMemoryUse : booléen forçant (ou pas) des calculs moins
gourmands en mémoire
self.__Type = "Method"
elif fromMatrix is not None:
self.__Method = None
- self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
+ if isinstance(fromMatrix, str):
+ fromMatrix = PlatformInfo.strmatrix2liststr( fromMatrix )
+ self.__Matrix = numpy.asarray( fromMatrix, dtype=float )
self.__Type = "Matrix"
else:
self.__Method = None
assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
_HxValue = []
for i in range(len(_HValue)):
- _HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T )
+ _HxValue.append( _HValue[i] )
if self.__avoidRC:
Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name)
else:
else:
if self.__Matrix is not None:
self.__addOneMatrixCall()
- _xv = numpy.ravel(xv).reshape((-1,1))
- _hv = self.__Matrix * _xv
+ _hv = self.__Matrix @ numpy.ravel(xv)
else:
self.__addOneMethodCall()
_xserie.append( xv )
_HxValue = []
for paire in _xuValue:
_xValue, _uValue = paire
- _xValue = numpy.matrix(numpy.ravel(_xValue)).T
self.__addOneMatrixCall()
- _HxValue.append( self.__Matrix * _xValue )
+ _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
else:
_xuArgs = []
for paire in _xuValue:
_HxValue = []
for paire in _nxValue:
_xNominal, _xValue = paire
- _xValue = numpy.matrix(numpy.ravel(_xValue)).T
self.__addOneMatrixCall()
- _HxValue.append( self.__Matrix * _xValue )
+ _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
else:
self.__addOneMethodCall( len(_nxValue) )
if self.__extraArgs is None:
if argsAsSerie:
self.__addOneMethodCall( len(ValueForMethodForm) )
for _vfmf in ValueForMethodForm:
- mValue.append( numpy.matrix( self.__Method(((_vfmf, None),)) ) )
+ mValue.append( self.__Method(((_vfmf, None),)) )
else:
self.__addOneMethodCall()
mValue = self.__Method(((ValueForMethodForm, None),))
self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = __Function["Tangent"], reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = __Function["Adjoint"], reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
elif asMatrix is not None:
- __matrice = numpy.matrix( __Matrix, numpy.float )
+ if isinstance(__Matrix, str):
+ __Matrix = PlatformInfo.strmatrix2liststr( __Matrix )
+ __matrice = numpy.asarray( __Matrix, dtype=float )
self.__FO["Direct"] = Operator( name = self.__name, fromMatrix = __matrice, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMatrix = __matrice, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF )
self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMatrix = __matrice.T, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF )
else:
return __SC
+# ==============================================================================
+class PartialAlgorithm(object):
+ """
+ Classe pour mimer "Algorithm" du point de vue stockage, mais sans aucune
+ action avancée comme la vérification . Pour les méthodes reprises ici,
+ le fonctionnement est identique à celles de la classe "Algorithm".
+ """
+ def __init__(self, name):
+ self._name = str( name )
+ self._parameters = {"StoreSupplementaryCalculations":[]}
+ #
+ self.StoredVariables = {}
+ self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
+ self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
+ self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
+ self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
+ self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
+ #
+ self.__canonical_stored_name = {}
+ for k in self.StoredVariables:
+ self.__canonical_stored_name[k.lower()] = k
+
+ def _toStore(self, key):
+ "True if in StoreSupplementaryCalculations, else False"
+ return key in self._parameters["StoreSupplementaryCalculations"]
+
+ def get(self, key=None):
+ """
+ Renvoie l'une des variables stockées identifiée par la clé, ou le
+ dictionnaire de l'ensemble des variables disponibles en l'absence de
+ clé. Ce sont directement les variables sous forme objet qui sont
+ renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
+ des classes de persistance.
+ """
+ if key is not None:
+ return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
+ else:
+ return self.StoredVariables
+
# ==============================================================================
class AlgorithmAndParameters(object):
"""
if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
if self.__algorithmName in ["EnsembleBlue",]:
asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
- self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
+ self.__Xb = Persistence.OneVector("Background")
for member in asPersistentVector:
- self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
+ self.__Xb.store( numpy.asarray(member, dtype=float) )
__Xb_shape = min(__B_shape)
else:
raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
#
if __Vector is not None:
self.__is_vector = True
- self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
+ if isinstance(__Vector, str):
+ __Vector = PlatformInfo.strvect2liststr( __Vector )
+ self.__V = numpy.ravel(numpy.asarray( __Vector, dtype=float )).reshape((-1,1))
self.shape = self.__V.shape
self.size = self.__V.size
elif __Series is not None:
self.__is_series = True
if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
- self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
- if isinstance(__Series, str): __Series = eval(__Series)
+ #~ self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
+ self.__V = Persistence.OneVector(self.__name)
+ if isinstance(__Series, str):
+ __Series = PlatformInfo.strmatrix2liststr(__Series)
for member in __Series:
- self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
+ if isinstance(member, str):
+ member = PlatformInfo.strvect2liststr( member )
+ self.__V.store(numpy.asarray( member, dtype=float ))
else:
self.__V = __Series
if isinstance(self.__V.shape, (tuple, list)):
#
if __Scalar is not None:
if isinstance(__Scalar, str):
- __Scalar = __Scalar.replace(";"," ").replace(","," ").split()
+ __Scalar = PlatformInfo.strvect2liststr( __Scalar )
if len(__Scalar) > 0: __Scalar = __Scalar[0]
if numpy.array(__Scalar).size != 1:
raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.array(__Scalar).size)
self.size = 0
elif __Vector is not None:
if isinstance(__Vector, str):
- __Vector = __Vector.replace(";"," ").replace(","," ").split()
+ __Vector = PlatformInfo.strvect2liststr( __Vector )
self.__is_vector = True
- self.__C = numpy.abs( numpy.array( numpy.ravel( __Vector ), dtype=float ) )
+ self.__C = numpy.abs( numpy.ravel(numpy.asarray( __Vector, dtype=float )) )
self.shape = (self.__C.size,self.__C.size)
self.size = self.__C.size**2
elif __Matrix is not None:
def asfullmatrix(self, msize=None):
"Matrice pleine"
if self.ismatrix():
- return numpy.asarray(self.__C)
+ return numpy.asarray(self.__C, dtype=float)
elif self.isvector():
- return numpy.asarray( numpy.diag(self.__C), float )
+ return numpy.asarray( numpy.diag(self.__C), dtype=float )
elif self.isscalar():
if msize is None:
raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
else:
- return numpy.asarray( self.__C * numpy.eye(int(msize)), float )
+ return numpy.asarray( self.__C * numpy.eye(int(msize)), dtype=float )
elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
return self.__C.asfullmatrix()
else:
raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
elif self.isscalar() and isinstance(other,numpy.matrix):
return other * self.__C
+ elif self.isscalar() and isinstance(other,float):
+ return other * self.__C
elif self.isobject():
return self.__C.__rmul__(other)
else:
import os, time, copy, types, sys, logging
import math, numpy, scipy, scipy.optimize, scipy.version
-from daCore.BasicObjects import Operator
+from daCore.BasicObjects import Operator, Covariance, PartialAlgorithm
from daCore.PlatformInfo import PlatformInfo
mpr = PlatformInfo().MachinePrecision()
mfp = PlatformInfo().MaximumPrecision()
raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
#
if _bgcovariance is None:
- BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
+ _Perturbations = numpy.tile( _bgcenter, _nbmembers)
else:
_Z = numpy.random.multivariate_normal(numpy.zeros(_bgcenter.size), _bgcovariance, size=_nbmembers).T
- BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers) + _Z
+ _Perturbations = numpy.tile( _bgcenter, _nbmembers) + _Z
#
- return BackgroundEnsemble
+ return _Perturbations
# ==============================================================================
def EnsembleOfBackgroundPerturbations( _bgcenter, _bgcovariance, _nbmembers, _withSVD = True):
if _nbmembers < 1:
raise ValueError("Number of members has to be strictly more than 1 (given number: %s)."%(str(_nbmembers),))
if _bgcovariance is None:
- BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
+ _Perturbations = numpy.tile( _bgcenter, _nbmembers)
else:
if _withSVD:
- U, s, V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
+ _U, _s, _V = numpy.linalg.svd(_bgcovariance, full_matrices=False)
_nbctl = _bgcenter.size
if _nbmembers > _nbctl:
_Z = numpy.concatenate((numpy.dot(
- numpy.diag(numpy.sqrt(s[:_nbctl])), V[:_nbctl]),
+ numpy.diag(numpy.sqrt(_s[:_nbctl])), _V[:_nbctl]),
numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1-_nbctl)), axis = 0)
else:
- _Z = numpy.dot(numpy.diag(numpy.sqrt(s[:_nbmembers-1])), V[:_nbmembers-1])
+ _Z = numpy.dot(numpy.diag(numpy.sqrt(_s[:_nbmembers-1])), _V[:_nbmembers-1])
_Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
- BackgroundEnsemble = _bgcenter + _Zca
+ _Perturbations = _bgcenter + _Zca
else:
if max(abs(_bgcovariance.flatten())) > 0:
_nbctl = _bgcenter.size
_Z = numpy.random.multivariate_normal(numpy.zeros(_nbctl),_bgcovariance,_nbmembers-1)
_Zca = __CenteredRandomAnomalies(_Z, _nbmembers)
- BackgroundEnsemble = _bgcenter + _Zca
+ _Perturbations = _bgcenter + _Zca
else:
- BackgroundEnsemble = numpy.tile( _bgcenter, _nbmembers)
+ _Perturbations = numpy.tile( _bgcenter, _nbmembers)
#
- return BackgroundEnsemble
+ return _Perturbations
# ==============================================================================
def EnsembleMean( __Ensemble ):
if not isinstance(__Bounds, numpy.ndarray): # Is an array
raise ValueError("Incorrect array definition of bounds data")
if 2*__Vector.size != __Bounds.size: # Is a 2 column array of vector lenght
- raise ValueError("Incorrect bounds number to be applied for this vector")
+ raise ValueError("Incorrect bounds number (%i) to be applied for this vector (of size %i)"%(__Bounds.size,__Vector.size))
if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2:
raise ValueError("Incorrectly shaped bounds data")
#
#
return __Vector
+# ==============================================================================
+def Apply3DVarRecentringOnEnsemble(__EnXn, __EnXf, __Ynpu, __HO, __R, __B, __Betaf):
+ "Recentre l'ensemble Xn autour de l'analyse 3DVAR"
+ #
+ Xf = EnsembleMean( __EnXf )
+ Pf = Covariance( asCovariance=EnsembleErrorCovariance(__EnXf) )
+ Pf = (1 - __Betaf) * __B + __Betaf * Pf
+ #
+ selfB = PartialAlgorithm("3DVAR")
+ selfB._parameters["Minimizer"] = "LBFGSB"
+ selfB._parameters["MaximumNumberOfSteps"] = 15000
+ selfB._parameters["CostDecrementTolerance"] = 1.e-7
+ selfB._parameters["ProjectedGradientTolerance"] = -1
+ selfB._parameters["GradientNormTolerance"] = 1.e-05
+ selfB._parameters["StoreInternalVariables"] = False
+ selfB._parameters["optiprint"] = -1
+ selfB._parameters["optdisp"] = 0
+ selfB._parameters["Bounds"] = None
+ selfB._parameters["InitializationPoint"] = Xf
+ std3dvar(selfB, Xf, __Ynpu, None, __HO, None, None, __R, Pf, None)
+ Xa = selfB.get("Analysis")[-1].reshape((-1,1))
+ del selfB
+ #
+ return Xa + EnsembleOfAnomalies( __EnXn )
+
# ==============================================================================
def c2ukf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
"""
XEtnnpi = numpy.asarray( Mm( (Xnp[:,point], Un) ) ).reshape((-1,1))
if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
- XEtnnpi = XEtnnpi + Cm * Un
+ XEtnnpi = XEtnnpi + Cm @ Un
if selfA._parameters["Bounds"] is not None and selfA._parameters["ConstrainedBy"] == "EstimateProjection":
XEtnnpi = ApplyBounds( XEtnnpi, selfA._parameters["Bounds"] )
elif selfA._parameters["EstimationOf"] == "Parameters":
_Innovation = Ynpu - Yncm.reshape((-1,1))
if selfA._parameters["EstimationOf"] == "Parameters":
if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
- _Innovation = _Innovation - Cm * Un
+ _Innovation = _Innovation - Cm @ Un
#
Kn = Pxyn * Pyyn.I
Xn = Xncm.reshape((-1,1)) + Kn * _Innovation
or selfA._toStore("CostFunctionJo") \
or selfA._toStore("CurrentOptimum") \
or selfA._toStore("APosterioriCovariance"):
- Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
+ Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
J = Jb + Jo
selfA.StoredVariables["CostFunctionJb"].store( Jb )
Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
- Xn_predicted = Xn_predicted + Cm * Un
+ Xn_predicted = Xn_predicted + Cm @ Un
Pn_predicted = Q + Mt * (Pn * Ma)
elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
# --- > Par principe, M = Id, Q = 0
HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
_Innovation = Ynpu - HX_predicted
if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
- _Innovation = _Innovation - Cm * Un
+ _Innovation = _Innovation - Cm @ Un
#
Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
Xn = Xn_predicted + Kn * _Innovation
returnSerieAsArrayMatrix = True )
if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
- EZ = EZ + Cm * Un
+ EZ = EZ + Cm @ Un
elif selfA._parameters["EstimationOf"] == "Parameters":
# --- > Par principe, M = Id, Q = 0
EZ = H( [(EL[:,i], Un) for i in range(__m)],
return 0
# ==============================================================================
-def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula"):
+def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
+ VariantM="KalmanFilterFormula",
+ Hybrid=None,
+ ):
"""
Ensemble-Transform EnKF
"""
returnSerieAsArrayMatrix = True )
if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
- Xn_predicted = Xn_predicted + Cm * Un
+ Xn_predicted = Xn_predicted + Cm @ Un
elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
# --- > Par principe, M = Id, Q = 0
Xn_predicted = EMX = Xn
returnSerieAsArrayMatrix = True )
#
# Mean of forecast and observation of forecast
- Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
- Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
+ Xfm = EnsembleMean( Xn_predicted )
+ Hfm = EnsembleMean( HX_predicted )
#
# Anomalies
EaX = EnsembleOfAnomalies( Xn_predicted, Xfm )
HXfm = H((Xfm[:,None], Un)) # Eventuellement Hfm
def CostFunction(w):
_A = Ynpu - HXfm.reshape((__p,1)) - (EaHX @ w).reshape((__p,1))
- _Jo = 0.5 * _A.T * RI * _A
+ _Jo = 0.5 * _A.T * (RI * _A)
_Jb = 0.5 * (__m+1) * math.log(1 + 1/__m + w.T @ w)
_J = _Jo + _Jb
return float(_J)
selfA._parameters["InflationFactor"],
)
#
- Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+ if Hybrid == "E3DVAR":
+ betaf = selfA._parameters["HybridCovarianceEquilibrium"]
+ Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
+ #
+ Xa = EnsembleMean( Xn )
#--------------------------
selfA._setInternalState("Xn", Xn)
selfA._setInternalState("seed", numpy.random.get_state())
or selfA._toStore("InnovationAtCurrentAnalysis") \
or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
+ _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
_Innovation = Ynpu - _HXa
#
selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
or selfA._toStore("CostFunctionJo") \
or selfA._toStore("CurrentOptimum") \
or selfA._toStore("APosterioriCovariance"):
- Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
- Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
+ Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+ Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
J = Jb + Jo
selfA.StoredVariables["CostFunctionJb"].store( Jb )
selfA.StoredVariables["CostFunctionJo"].store( Jo )
Xn_predicted = numpy.ravel( M( (Xn, Un) ) ).reshape((__n,1))
if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
- Xn_predicted = Xn_predicted + Cm * Un
+ Xn_predicted = Xn_predicted + Cm @ Un
Pn_predicted = Q + Mt * (Pn * Ma)
elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
# --- > Par principe, M = Id, Q = 0
HX_predicted = numpy.ravel( H( (Xn_predicted, Un) ) ).reshape((__p,1))
_Innovation = Ynpu - HX_predicted
if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
- _Innovation = _Innovation - Cm * Un
+ _Innovation = _Innovation - Cm @ Un
#
Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
Xn = Xn_predicted + Kn * _Innovation
#
if U is not None:
if hasattr(U,"store") and len(U)>1:
- Un = numpy.asmatrix(numpy.ravel( U[step] )).T
+ Un = numpy.ravel( U[step] ).reshape((-1,1))
elif hasattr(U,"store") and len(U)==1:
- Un = numpy.asmatrix(numpy.ravel( U[0] )).T
+ Un = numpy.ravel( U[0] ).reshape((-1,1))
else:
- Un = numpy.asmatrix(numpy.ravel( U )).T
+ Un = numpy.ravel( U ).reshape((-1,1))
else:
Un = None
#
selfA._parameters["InflationFactor"],
)
#
- Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+ Xa = EnsembleMean( Xn )
#--------------------------
selfA._setInternalState("Xn", Xn)
selfA._setInternalState("seed", numpy.random.get_state())
or selfA._toStore("InnovationAtCurrentAnalysis") \
or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
+ _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
_Innovation = Ynpu - _HXa
#
selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
or selfA._toStore("CostFunctionJo") \
or selfA._toStore("CurrentOptimum") \
or selfA._toStore("APosterioriCovariance"):
- Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
- Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
+ Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+ Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
J = Jb + Jo
selfA.StoredVariables["CostFunctionJb"].store( Jb )
selfA.StoredVariables["CostFunctionJo"].store( Jo )
#
Xini = selfA._parameters["InitializationPoint"]
#
- HXb = numpy.asmatrix(numpy.ravel( Hm( Xb ) )).T
+ HXb = numpy.ravel( Hm( Xb ) ).reshape((-1,1))
Innovation = Y - HXb
#
# Outer Loop
# Définition de la fonction-coût
# ------------------------------
def CostFunction(dx):
- _dX = numpy.asmatrix(numpy.ravel( dx )).T
+ _dX = numpy.ravel( dx ).reshape((-1,1))
if selfA._parameters["StoreInternalVariables"] or \
selfA._toStore("CurrentState") or \
selfA._toStore("CurrentOptimum"):
selfA.StoredVariables["CurrentState"].store( Xb + _dX )
- _HdX = Ht * _dX
- _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
+ _HdX = Ht @ _dX
+ _HdX = numpy.ravel( _HdX ).reshape((-1,1))
_dInnovation = Innovation - _HdX
if selfA._toStore("SimulatedObservationAtCurrentState") or \
selfA._toStore("SimulatedObservationAtCurrentOptimum"):
if selfA._toStore("InnovationAtCurrentState"):
selfA.StoredVariables["InnovationAtCurrentState"].store( _dInnovation )
#
- Jb = float( 0.5 * _dX.T * BI * _dX )
- Jo = float( 0.5 * _dInnovation.T * RI * _dInnovation )
+ Jb = float( 0.5 * _dX.T * (BI * _dX) )
+ Jo = float( 0.5 * _dInnovation.T * (RI * _dInnovation) )
J = Jb + Jo
#
selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
return J
#
def GradientOfCostFunction(dx):
- _dX = numpy.asmatrix(numpy.ravel( dx )).T
- _HdX = Ht * _dX
- _HdX = numpy.asmatrix(numpy.ravel( _HdX )).T
+ _dX = numpy.ravel( dx )
+ _HdX = Ht @ _dX
+ _HdX = numpy.ravel( _HdX ).reshape((-1,1))
_dInnovation = Innovation - _HdX
- GradJb = BI * _dX
+ GradJb = BI @ _dX
GradJo = - Ht.T @ (RI * _dInnovation)
GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
return GradJ
return 0
# ==============================================================================
-def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="MLEF13",
- BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000):
+def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
+ VariantM="MLEF13", BnotT=False, _epsilon=1.e-3, _e=1.e-7, _jmax=15000,
+ Hybrid=None,
+ ):
"""
Maximum Likelihood Ensemble Filter
"""
#
if U is not None:
if hasattr(U,"store") and len(U)>1:
- Un = numpy.asmatrix(numpy.ravel( U[step] )).T
+ Un = numpy.ravel( U[step] ).reshape((-1,1))
elif hasattr(U,"store") and len(U)==1:
- Un = numpy.asmatrix(numpy.ravel( U[0] )).T
+ Un = numpy.ravel( U[0] ).reshape((-1,1))
else:
- Un = numpy.asmatrix(numpy.ravel( U )).T
+ Un = numpy.ravel( U ).reshape((-1,1))
else:
Un = None
#
Xn_predicted = EnsemblePerturbationWithGivenCovariance( EMX, Q )
if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
- Xn_predicted = Xn_predicted + Cm * Un
+ Xn_predicted = Xn_predicted + Cm @ Un
elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
# --- > Par principe, M = Id, Q = 0
Xn_predicted = EMX = Xn
selfA._parameters["InflationFactor"],
)
#
- Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+ if Hybrid == "E3DVAR":
+ betaf = selfA._parameters["HybridCovarianceEquilibrium"]
+ Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
+ #
+ Xa = EnsembleMean( Xn )
#--------------------------
selfA._setInternalState("Xn", Xn)
selfA._setInternalState("seed", numpy.random.get_state())
or selfA._toStore("InnovationAtCurrentAnalysis") \
or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
+ _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
_Innovation = Ynpu - _HXa
#
selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
or selfA._toStore("CostFunctionJo") \
or selfA._toStore("CurrentOptimum") \
or selfA._toStore("APosterioriCovariance"):
- Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
- Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
+ Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+ Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
J = Jb + Jo
selfA.StoredVariables["CostFunctionJb"].store( Jb )
selfA.StoredVariables["CostFunctionJo"].store( Jo )
iteration += 1
#
Derivees = numpy.array(fprime(variables))
- Derivees = Derivees.reshape(n,p) # Necessaire pour remettre en place la matrice si elle passe par des tuyaux YACS
+ Derivees = Derivees.reshape(n,p) # ADAO & check shape
DeriveesT = Derivees.transpose()
M = numpy.dot( DeriveesT , (numpy.array(numpy.matrix(p*[poids,]).T)*Derivees) )
SM = numpy.transpose(numpy.dot( DeriveesT , veps ))
# Initialisation
# --------------
if selfA._parameters["EstimationOf"] == "State":
- M = EM["Direct"].appliedTo
+ M = EM["Direct"].appliedControledFormTo
+ if CM is not None and "Tangent" in CM and U is not None:
+ Cm = CM["Tangent"].asMatrix(Xb)
+ else:
+ Cm = None
#
if len(selfA.StoredVariables["Analysis"])==0 or not selfA._parameters["nextStep"]:
Xn = numpy.ravel(Xb).reshape((-1,1))
else:
Ynpu = numpy.ravel( Y ).reshape((-1,1))
#
+ if U is not None:
+ if hasattr(U,"store") and len(U)>1:
+ Un = numpy.ravel( U[step] ).reshape((-1,1))
+ elif hasattr(U,"store") and len(U)==1:
+ Un = numpy.ravel( U[0] ).reshape((-1,1))
+ else:
+ Un = numpy.ravel( U ).reshape((-1,1))
+ else:
+ Un = None
+ #
if selfA._parameters["EstimationOf"] == "State": # Forecast
- Xn_predicted = M( Xn )
+ Xn_predicted = M( (Xn, Un) )
if selfA._toStore("ForecastState"):
selfA.StoredVariables["ForecastState"].store( Xn_predicted )
+ if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
+ Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
+ Xn_predicted = Xn_predicted + Cm @ Un
elif selfA._parameters["EstimationOf"] == "Parameters": # No forecast
# --- > Par principe, M = Id, Q = 0
Xn_predicted = Xn
Xn_predicted = numpy.ravel(Xn_predicted).reshape((-1,1))
#
- oneCycle(selfA, Xn_predicted, Ynpu, U, HO, None, None, R, B, None)
+ oneCycle(selfA, Xn_predicted, Ynpu, None, HO, None, None, R, B, None)
#
Xn = selfA.StoredVariables["Analysis"][-1]
#--------------------------
HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
else:
HXb = Hm( Xb )
- HXb = numpy.asmatrix(numpy.ravel( HXb )).T
+ HXb = numpy.ravel( HXb ).reshape((-1,1))
if Y.size != HXb.size:
raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
if max(Y.shape) != max(HXb.shape):
return 0
# ==============================================================================
-def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, VariantM="KalmanFilterFormula16"):
+def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q,
+ VariantM="KalmanFilterFormula16",
+ Hybrid=None,
+ ):
"""
Stochastic EnKF
"""
#
if U is not None:
if hasattr(U,"store") and len(U)>1:
- Un = numpy.asmatrix(numpy.ravel( U[step] )).T
+ Un = numpy.ravel( U[step] ).reshape((-1,1))
elif hasattr(U,"store") and len(U)==1:
- Un = numpy.asmatrix(numpy.ravel( U[0] )).T
+ Un = numpy.ravel( U[0] ).reshape((-1,1))
else:
- Un = numpy.asmatrix(numpy.ravel( U )).T
+ Un = numpy.ravel( U ).reshape((-1,1))
else:
Un = None
#
returnSerieAsArrayMatrix = True )
if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
- Xn_predicted = Xn_predicted + Cm * Un
+ Xn_predicted = Xn_predicted + Cm @ Un
elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
# --- > Par principe, M = Id, Q = 0
Xn_predicted = EMX = Xn
returnSerieAsArrayMatrix = True )
#
# Mean of forecast and observation of forecast
- Xfm = Xn_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
- Hfm = HX_predicted.mean(axis=1, dtype=mfp).astype('float').reshape((__p,1))
+ Xfm = EnsembleMean( Xn_predicted )
+ Hfm = EnsembleMean( HX_predicted )
#
#--------------------------
if VariantM == "KalmanFilterFormula05":
selfA._parameters["InflationFactor"],
)
#
- Xa = Xn.mean(axis=1, dtype=mfp).astype('float').reshape((__n,1))
+ if Hybrid == "E3DVAR":
+ betaf = selfA._parameters["HybridCovarianceEquilibrium"]
+ Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf)
+ #
+ Xa = EnsembleMean( Xn )
#--------------------------
selfA._setInternalState("Xn", Xn)
selfA._setInternalState("seed", numpy.random.get_state())
or selfA._toStore("InnovationAtCurrentAnalysis") \
or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
- _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
+ _HXa = numpy.ravel( H((Xa, Un)) ).reshape((-1,1))
_Innovation = Ynpu - _HXa
#
selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
or selfA._toStore("CostFunctionJo") \
or selfA._toStore("CurrentOptimum") \
or selfA._toStore("APosterioriCovariance"):
- Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
- Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
+ Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+ Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
J = Jb + Jo
selfA.StoredVariables["CostFunctionJb"].store( Jb )
selfA.StoredVariables["CostFunctionJo"].store( Jo )
def Un(_step):
if U is not None:
if hasattr(U,"store") and 1<=_step<len(U) :
- _Un = numpy.asmatrix(numpy.ravel( U[_step] )).T
+ _Un = numpy.ravel( U[_step] ).reshape((-1,1))
elif hasattr(U,"store") and len(U)==1:
- _Un = numpy.asmatrix(numpy.ravel( U[0] )).T
+ _Un = numpy.ravel( U[0] ).reshape((-1,1))
else:
- _Un = numpy.asmatrix(numpy.ravel( U )).T
+ _Un = numpy.ravel( U ).reshape((-1,1))
else:
_Un = None
return _Un
def CmUn(_xn,_un):
if Cm is not None and _un is not None: # Attention : si Cm est aussi dans M, doublon !
_Cm = Cm.reshape(_xn.size,_un.size) # ADAO & check shape
- _CmUn = _Cm * _un
+ _CmUn = (_Cm @ _un).reshape((-1,1))
else:
_CmUn = 0.
return _CmUn
selfA.DirectCalculation = [None,] # Le pas 0 n'est pas observé
selfA.DirectInnovation = [None,] # Le pas 0 n'est pas observé
def CostFunction(x):
- _X = numpy.asmatrix(numpy.ravel( x )).T
+ _X = numpy.ravel( x ).reshape((-1,1))
if selfA._parameters["StoreInternalVariables"] or \
selfA._toStore("CurrentState") or \
selfA._toStore("CurrentOptimum"):
selfA.StoredVariables["CurrentState"].store( _X )
- Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
+ Jb = float( 0.5 * (_X - Xb).T * (BI * (_X - Xb)) )
selfA.DirectCalculation = [None,]
selfA.DirectInnovation = [None,]
Jo = 0.
_Xn = _X
for step in range(0,duration-1):
if hasattr(Y,"store"):
- _Ynpu = numpy.asmatrix(numpy.ravel( Y[step+1] )).T
+ _Ynpu = numpy.ravel( Y[step+1] ).reshape((-1,1))
else:
- _Ynpu = numpy.asmatrix(numpy.ravel( Y )).T
+ _Ynpu = numpy.ravel( Y ).reshape((-1,1))
_Un = Un(step)
#
# Etape d'évolution
if selfA._parameters["EstimationOf"] == "State":
- _Xn = Mm( (_Xn, _Un) ) + CmUn(_Xn, _Un)
+ _Xn = Mm( (_Xn, _Un) ).reshape((-1,1)) + CmUn(_Xn, _Un)
elif selfA._parameters["EstimationOf"] == "Parameters":
pass
#
#
# Etape de différence aux observations
if selfA._parameters["EstimationOf"] == "State":
- _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, None) ) )).T
+ _YmHMX = _Ynpu - numpy.ravel( Hm( (_Xn, None) ) ).reshape((-1,1))
elif selfA._parameters["EstimationOf"] == "Parameters":
- _YmHMX = _Ynpu - numpy.asmatrix(numpy.ravel( Hm( (_Xn, _Un) ) )).T - CmUn(_Xn, _Un)
+ _YmHMX = _Ynpu - numpy.ravel( Hm( (_Xn, _Un) ) ).reshape((-1,1)) - CmUn(_Xn, _Un)
#
# Stockage de l'état
selfA.DirectCalculation.append( _Xn )
selfA.DirectInnovation.append( _YmHMX )
#
# Ajout dans la fonctionnelle d'observation
- Jo = Jo + 0.5 * float( _YmHMX.T * RI * _YmHMX )
+ Jo = Jo + 0.5 * float( _YmHMX.T * (RI * _YmHMX) )
J = Jb + Jo
#
selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
return J
#
def GradientOfCostFunction(x):
- _X = numpy.asmatrix(numpy.ravel( x )).T
+ _X = numpy.ravel( x ).reshape((-1,1))
GradJb = BI * (_X - Xb)
GradJo = 0.
for step in range(duration-1,0,-1):
#
if U is not None:
if hasattr(U,"store") and len(U)>1:
- Un = numpy.asmatrix(numpy.ravel( U[step] )).T
+ Un = numpy.ravel( U[step] ).reshape((-1,1))
elif hasattr(U,"store") and len(U)==1:
- Un = numpy.asmatrix(numpy.ravel( U[0] )).T
+ Un = numpy.ravel( U[0] ).reshape((-1,1))
else:
- Un = numpy.asmatrix(numpy.ravel( U )).T
+ Un = numpy.ravel( U ).reshape((-1,1))
else:
Un = None
#
if selfA._parameters["EstimationOf"] == "State": # Forecast + Q and observation of forecast
- Xn_predicted = Mt * Xn
+ Xn_predicted = Mt @ Xn
if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
Cm = Cm.reshape(__n,Un.size) # ADAO & check shape
- Xn_predicted = Xn_predicted + Cm * Un
+ Xn_predicted = Xn_predicted + Cm @ Un
Pn_predicted = Q + Mt * (Pn * Ma)
elif selfA._parameters["EstimationOf"] == "Parameters": # Observation of forecast
# --- > Par principe, M = Id, Q = 0
Pn_predicted = Pn
#
if selfA._parameters["EstimationOf"] == "State":
- HX_predicted = Ht * Xn_predicted
+ HX_predicted = Ht @ Xn_predicted
_Innovation = Ynpu - HX_predicted
elif selfA._parameters["EstimationOf"] == "Parameters":
- HX_predicted = Ht * Xn_predicted
+ HX_predicted = Ht @ Xn_predicted
_Innovation = Ynpu - HX_predicted
if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
- _Innovation = _Innovation - Cm * Un
+ _Innovation = _Innovation - Cm @ Un
#
Kn = Pn_predicted * Ha * numpy.linalg.inv(R + numpy.dot(Ht, Pn_predicted * Ha))
Xn = Xn_predicted + Kn * _Innovation
or selfA._toStore("CostFunctionJo") \
or selfA._toStore("CurrentOptimum") \
or selfA._toStore("APosterioriCovariance"):
- Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
- Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
+ Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
+ Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
J = Jb + Jo
selfA.StoredVariables["CostFunctionJb"].store( Jb )
selfA.StoredVariables["CostFunctionJo"].store( Jo )
XEtnnpi = numpy.asarray( Mm( (Xnp[:,point], Un) ) ).reshape((-1,1))
if Cm is not None and Un is not None: # Attention : si Cm est aussi dans M, doublon !
Cm = Cm.reshape(Xn.size,Un.size) # ADAO & check shape
- XEtnnpi = XEtnnpi + Cm * Un
+ XEtnnpi = XEtnnpi + Cm @ Un
elif selfA._parameters["EstimationOf"] == "Parameters":
# --- > Par principe, M = Id, Q = 0
XEtnnpi = Xnp[:,point]
_Innovation = Ynpu - Yncm.reshape((-1,1))
if selfA._parameters["EstimationOf"] == "Parameters":
if Cm is not None and Un is not None: # Attention : si Cm est aussi dans H, doublon !
- _Innovation = _Innovation - Cm * Un
+ _Innovation = _Innovation - Cm @ Un
#
Kn = Pxyn * Pyyn.I
Xn = Xncm.reshape((-1,1)) + Kn * _Innovation
or selfA._toStore("CostFunctionJo") \
or selfA._toStore("CurrentOptimum") \
or selfA._toStore("APosterioriCovariance"):
- Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
+ Jb = float( 0.5 * (Xa - Xb).T * (BI * (Xa - Xb)) )
Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
J = Jb + Jo
selfA.StoredVariables["CostFunctionJb"].store( Jb )
# Définition de la fonction-coût
# ------------------------------
def CostFunction(v):
- _V = numpy.asmatrix(numpy.ravel( v )).T
+ _V = numpy.ravel( v ).reshape((-1,1))
_X = Xb + B * _V
if selfA._parameters["StoreInternalVariables"] or \
selfA._toStore("CurrentState") or \
selfA._toStore("CurrentOptimum"):
selfA.StoredVariables["CurrentState"].store( _X )
_HX = Hm( _X )
- _HX = numpy.asmatrix(numpy.ravel( _HX )).T
+ _HX = numpy.ravel( _HX ).reshape((-1,1))
_Innovation = Y - _HX
if selfA._toStore("SimulatedObservationAtCurrentState") or \
selfA._toStore("SimulatedObservationAtCurrentOptimum"):
if selfA._toStore("InnovationAtCurrentState"):
selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
#
- Jb = float( 0.5 * _V.T * BT * _V )
- Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
+ Jb = float( 0.5 * _V.T * (BT * _V) )
+ Jo = float( 0.5 * _Innovation.T * (RI * _Innovation) )
J = Jb + Jo
#
selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
import platform
import locale
import logging
+import re
# ==============================================================================
class PlatformInfo(object):
raise ValueError("Cannot convert \"%s\" as a D/M/Y H:M date"%d)
return __number
+def strvect2liststr( __strvect ):
+ """
+ Fonction de secours, conversion d'une chaîne de caractères de
+ représentation de vecteur en une liste de chaînes de caractères de
+ représentation de flottants
+ """
+ for s in ("array", "matrix", "list", "tuple", "[", "]", "(", ")"):
+ __strvect = __strvect.replace(s,"") # Rien
+ for s in (",", ";"):
+ __strvect = __strvect.replace(s," ") # Blanc
+ return __strvect.split()
+
+def strmatrix2liststr( __strvect ):
+ """
+ Fonction de secours, conversion d'une chaîne de caractères de
+ représentation de matrice en une liste de chaînes de caractères de
+ représentation de flottants
+ """
+ for s in ("array", "matrix", "list", "tuple", "[", "(", "'", '"'):
+ __strvect = __strvect.replace(s,"") # Rien
+ __strvect = __strvect.replace(","," ") # Blanc
+ for s in ("]", ")"):
+ __strvect = __strvect.replace(s,";") # "]" et ")" par ";"
+ __strvect = re.sub(';\s*;',';',__strvect)
+ __strvect = __strvect.rstrip(";") # Après ^ et avant v
+ __strmat = [l.split() for l in __strvect.split(";")]
+ return __strmat
+
def checkFileNameConformity( __filename, __warnInsteadOfPrint=True ):
if sys.platform.startswith("win") and len(__filename) > 256:
__conform = False