From: Jean-Philippe ARGAUD Date: Sun, 24 Mar 2019 18:10:15 +0000 (+0100) Subject: Improvement of documentation and variables output for filters X-Git-Tag: V9_3_0rc2~13 X-Git-Url: http://git.salome-platform.org/gitweb/?a=commitdiff_plain;h=a90b2576f910adc81472faaeeb0a9691bf7f1507;p=modules%2Fadao.git Improvement of documentation and variables output for filters --- diff --git a/doc/en/ref_algorithm_EnsembleKalmanFilter.rst b/doc/en/ref_algorithm_EnsembleKalmanFilter.rst index 0497cfa..02e49c2 100644 --- a/doc/en/ref_algorithm_EnsembleKalmanFilter.rst +++ b/doc/en/ref_algorithm_EnsembleKalmanFilter.rst @@ -96,8 +96,10 @@ StoreSupplementaryCalculations "CurrentOptimum", "CurrentState", "IndexOfOptimum", + "InnovationAtCurrentAnalysis", "InnovationAtCurrentState", "PredictedState", + "SimulatedObservationAtCurrentAnalysis", "SimulatedObservationAtCurrentOptimum", "SimulatedObservationAtCurrentState", ]. @@ -143,10 +145,14 @@ StoreSupplementaryCalculations .. include:: snippets/IndexOfOptimum.rst +.. include:: snippets/InnovationAtCurrentAnalysis.rst + .. include:: snippets/InnovationAtCurrentState.rst .. include:: snippets/PredictedState.rst +.. include:: snippets/SimulatedObservationAtCurrentAnalysis.rst + .. include:: snippets/SimulatedObservationAtCurrentOptimum.rst .. include:: snippets/SimulatedObservationAtCurrentState.rst diff --git a/doc/en/ref_algorithm_ExtendedKalmanFilter.rst b/doc/en/ref_algorithm_ExtendedKalmanFilter.rst index 2a082a7..979254d 100644 --- a/doc/en/ref_algorithm_ExtendedKalmanFilter.rst +++ b/doc/en/ref_algorithm_ExtendedKalmanFilter.rst @@ -90,8 +90,10 @@ StoreSupplementaryCalculations "CurrentOptimum", "CurrentState", "IndexOfOptimum", + "InnovationAtCurrentAnalysis", "InnovationAtCurrentState", "PredictedState", + "SimulatedObservationAtCurrentAnalysis", "SimulatedObservationAtCurrentOptimum", "SimulatedObservationAtCurrentState", ]. @@ -137,10 +139,14 @@ StoreSupplementaryCalculations .. include:: snippets/IndexOfOptimum.rst +.. include:: snippets/InnovationAtCurrentAnalysis.rst + .. include:: snippets/InnovationAtCurrentState.rst .. include:: snippets/PredictedState.rst +.. include:: snippets/SimulatedObservationAtCurrentAnalysis.rst + .. include:: snippets/SimulatedObservationAtCurrentOptimum.rst .. include:: snippets/SimulatedObservationAtCurrentState.rst diff --git a/doc/en/ref_algorithm_KalmanFilter.rst b/doc/en/ref_algorithm_KalmanFilter.rst index 7c52db1..811ddff 100644 --- a/doc/en/ref_algorithm_KalmanFilter.rst +++ b/doc/en/ref_algorithm_KalmanFilter.rst @@ -89,8 +89,10 @@ StoreSupplementaryCalculations "CurrentOptimum", "CurrentState", "IndexOfOptimum", + "InnovationAtCurrentAnalysis", "InnovationAtCurrentState", "PredictedState", + "SimulatedObservationAtCurrentAnalysis", "SimulatedObservationAtCurrentOptimum", "SimulatedObservationAtCurrentState", ]. @@ -136,10 +138,14 @@ StoreSupplementaryCalculations .. include:: snippets/IndexOfOptimum.rst +.. include:: snippets/InnovationAtCurrentAnalysis.rst + .. include:: snippets/InnovationAtCurrentState.rst .. include:: snippets/PredictedState.rst +.. include:: snippets/SimulatedObservationAtCurrentAnalysis.rst + .. include:: snippets/SimulatedObservationAtCurrentOptimum.rst .. include:: snippets/SimulatedObservationAtCurrentState.rst diff --git a/doc/en/snippets/InnovationAtCurrentAnalysis.rst b/doc/en/snippets/InnovationAtCurrentAnalysis.rst new file mode 100644 index 0000000..60249a5 --- /dev/null +++ b/doc/en/snippets/InnovationAtCurrentAnalysis.rst @@ -0,0 +1,9 @@ +.. index:: single: InnovationAtCurrentAnalysis + +InnovationAtCurrentAnalysis + *List of vectors*. Each element is an innovation vector at current analysis. + This quantity is identical to the innovation vector at current state in the + case of a single-state assimilation. + + Example: + ``ds = ADD.get("InnovationAtCurrentAnalysis")[-1]`` diff --git a/doc/en/snippets/SimulatedObservationAtCurrentAnalysis.rst b/doc/en/snippets/SimulatedObservationAtCurrentAnalysis.rst new file mode 100644 index 0000000..3687599 --- /dev/null +++ b/doc/en/snippets/SimulatedObservationAtCurrentAnalysis.rst @@ -0,0 +1,10 @@ +.. index:: single: SimulatedObservationAtCurrentAnalysis + +SimulatedObservationAtCurrentAnalysis + *List of vectors*. Each element is an observed vector simulated by the + observation operator from the current analysis, that is, in the observation + space. This quantity is identical to the observed vector simulated at + current state in the case of a single-state assimilation. + + Example: + ``hxs = ADD.get("SimulatedObservationAtCurrentAnalysis")[-1]`` diff --git a/doc/fr/ref_algorithm_EnsembleKalmanFilter.rst b/doc/fr/ref_algorithm_EnsembleKalmanFilter.rst index 6b55b69..1049d1f 100644 --- a/doc/fr/ref_algorithm_EnsembleKalmanFilter.rst +++ b/doc/fr/ref_algorithm_EnsembleKalmanFilter.rst @@ -97,8 +97,10 @@ StoreSupplementaryCalculations "CurrentOptimum", "CurrentState", "IndexOfOptimum", + "InnovationAtCurrentAnalysis", "InnovationAtCurrentState", "PredictedState", + "SimulatedObservationAtCurrentAnalysis", "SimulatedObservationAtCurrentOptimum", "SimulatedObservationAtCurrentState", ]. @@ -144,10 +146,14 @@ StoreSupplementaryCalculations .. include:: snippets/IndexOfOptimum.rst +.. include:: snippets/InnovationAtCurrentAnalysis.rst + .. include:: snippets/InnovationAtCurrentState.rst .. include:: snippets/PredictedState.rst +.. include:: snippets/SimulatedObservationAtCurrentAnalysis.rst + .. include:: snippets/SimulatedObservationAtCurrentOptimum.rst .. include:: snippets/SimulatedObservationAtCurrentState.rst diff --git a/doc/fr/ref_algorithm_ExtendedKalmanFilter.rst b/doc/fr/ref_algorithm_ExtendedKalmanFilter.rst index 5a3a3ba..6e80863 100644 --- a/doc/fr/ref_algorithm_ExtendedKalmanFilter.rst +++ b/doc/fr/ref_algorithm_ExtendedKalmanFilter.rst @@ -91,8 +91,10 @@ StoreSupplementaryCalculations "CurrentOptimum", "CurrentState", "IndexOfOptimum", + "InnovationAtCurrentAnalysis", "InnovationAtCurrentState", "PredictedState", + "SimulatedObservationAtCurrentAnalysis", "SimulatedObservationAtCurrentOptimum", "SimulatedObservationAtCurrentState", ]. @@ -138,10 +140,14 @@ StoreSupplementaryCalculations .. include:: snippets/IndexOfOptimum.rst +.. include:: snippets/InnovationAtCurrentAnalysis.rst + .. include:: snippets/InnovationAtCurrentState.rst .. include:: snippets/PredictedState.rst +.. include:: snippets/SimulatedObservationAtCurrentAnalysis.rst + .. include:: snippets/SimulatedObservationAtCurrentOptimum.rst .. include:: snippets/SimulatedObservationAtCurrentState.rst diff --git a/doc/fr/ref_algorithm_KalmanFilter.rst b/doc/fr/ref_algorithm_KalmanFilter.rst index 1f92ad3..ea85997 100644 --- a/doc/fr/ref_algorithm_KalmanFilter.rst +++ b/doc/fr/ref_algorithm_KalmanFilter.rst @@ -89,8 +89,10 @@ StoreSupplementaryCalculations "CurrentOptimum", "CurrentState", "IndexOfOptimum", + "InnovationAtCurrentAnalysis", "InnovationAtCurrentState", "PredictedState", + "SimulatedObservationAtCurrentAnalysis", "SimulatedObservationAtCurrentOptimum", "SimulatedObservationAtCurrentState", ]. @@ -136,10 +138,14 @@ StoreSupplementaryCalculations .. include:: snippets/IndexOfOptimum.rst +.. include:: snippets/InnovationAtCurrentAnalysis.rst + .. include:: snippets/InnovationAtCurrentState.rst .. include:: snippets/PredictedState.rst +.. include:: snippets/SimulatedObservationAtCurrentAnalysis.rst + .. include:: snippets/SimulatedObservationAtCurrentOptimum.rst .. include:: snippets/SimulatedObservationAtCurrentState.rst diff --git a/doc/fr/snippets/InnovationAtCurrentAnalysis.rst b/doc/fr/snippets/InnovationAtCurrentAnalysis.rst new file mode 100644 index 0000000..95ed07d --- /dev/null +++ b/doc/fr/snippets/InnovationAtCurrentAnalysis.rst @@ -0,0 +1,9 @@ +.. index:: single: InnovationAtCurrentAnalysis + +InnovationAtCurrentAnalysis + *Liste de vecteurs*. Chaque élément est un vecteur d'innovation à l'état + analysé courant. Cette quantité est identique au vecteur d'innovation à + l'état courant dans le cas d'une assimilation mono-état. + + Exemple : + ``ds = ADD.get("InnovationAtCurrentAnalysis")[-1]`` diff --git a/doc/fr/snippets/SimulatedObservationAtCurrentAnalysis.rst b/doc/fr/snippets/SimulatedObservationAtCurrentAnalysis.rst new file mode 100644 index 0000000..5bead53 --- /dev/null +++ b/doc/fr/snippets/SimulatedObservationAtCurrentAnalysis.rst @@ -0,0 +1,11 @@ +.. index:: single: SimulatedObservationAtCurrentAnalysis + +SimulatedObservationAtCurrentAnalysis + *Liste de vecteurs*. Chaque élément est un vecteur d'observation simulé par + l'opérateur d'observation à partir de l'état courant, c'est-à-dire dans + l'espace des observations. Cette quantité est identique au vecteur + d'observation simulé à l'état courant dans le cas d'une assimilation + mono-état. + + Exemple : + ``hxs = ADD.get("SimulatedObservationAtCurrentAnalysis")[-1]`` diff --git a/src/daComposant/daAlgorithms/EnsembleKalmanFilter.py b/src/daComposant/daAlgorithms/EnsembleKalmanFilter.py index 4a84493..19e808e 100644 --- a/src/daComposant/daAlgorithms/EnsembleKalmanFilter.py +++ b/src/daComposant/daAlgorithms/EnsembleKalmanFilter.py @@ -75,8 +75,10 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): "CurrentOptimum", "CurrentState", "IndexOfOptimum", + "InnovationAtCurrentAnalysis", "InnovationAtCurrentState", "PredictedState", + "SimulatedObservationAtCurrentAnalysis", "SimulatedObservationAtCurrentOptimum", "SimulatedObservationAtCurrentState", ] @@ -119,6 +121,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): or self._toStore("CostFunctionJ") \ or self._toStore("CostFunctionJb") \ or self._toStore("CostFunctionJo") \ + or self._toStore("CurrentOptimum") \ or self._toStore("APosterioriCovariance"): BI = B.getI() RI = R.getI() @@ -141,7 +144,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): if self._toStore("APosterioriCovariance"): self.StoredVariables["APosterioriCovariance"].store( Pn ) covarianceXa = Pn - Xa = Xb + Xa = XaMin = Xb previousJMinimum = numpy.finfo(float).max # # Predimensionnement @@ -197,7 +200,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): # for i in range(__m): Xn[:,i] = Xn_predicted[:,i] + K * (Yo[:,i] - HX_predicted[:,i]) - del Yo, PfHT, HPfHT + del PfHT, HPfHT # Xa = Xn.mean(axis=1, dtype=mfp) # @@ -206,24 +209,32 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): or self._toStore("CostFunctionJb") \ or self._toStore("CostFunctionJo") \ or self._toStore("APosterioriCovariance") \ - or self._toStore("InnovationAtCurrentState") \ - or self._toStore("SimulatedObservationAtCurrentState") \ + or self._toStore("InnovationAtCurrentAnalysis") \ + or self._toStore("SimulatedObservationAtCurrentAnalysis") \ or self._toStore("SimulatedObservationAtCurrentOptimum"): - _HX = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T - _Innovation = Ynpu - _HX + _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T + _Innovation = Ynpu - _HXa # + # ---> avec analysis self.StoredVariables["Analysis"].store( Xa ) + if self._toStore("SimulatedObservationAtCurrentAnalysis"): + self.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa ) + if self._toStore("InnovationAtCurrentAnalysis"): + self.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation ) + # ---> avec current state if self._parameters["StoreInternalVariables"] \ - or self._toStore("CurrentState") \ - or self._toStore("CurrentOptimum"): - self.StoredVariables["CurrentState"].store( Xa ) + or self._toStore("CurrentState"): + self.StoredVariables["CurrentState"].store( Xn ) + if self._toStore("PredictedState"): + self.StoredVariables["PredictedState"].store( Xn_predicted ) if self._toStore("BMA"): self.StoredVariables["BMA"].store( Xn_predicted - Xa ) if self._toStore("InnovationAtCurrentState"): - self.StoredVariables["InnovationAtCurrentState"].store( _Innovation ) + self.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu ) if self._toStore("SimulatedObservationAtCurrentState") \ or self._toStore("SimulatedObservationAtCurrentOptimum"): - self.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX ) + self.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted ) + # ---> autres if self._parameters["StoreInternalVariables"] \ or self._toStore("CostFunctionJ") \ or self._toStore("CostFunctionJb") \ @@ -247,9 +258,9 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): if self._toStore("IndexOfOptimum"): self.StoredVariables["IndexOfOptimum"].store( IndexMin ) if self._toStore("CurrentOptimum"): - self.StoredVariables["CurrentOptimum"].store( self.StoredVariables["CurrentState"][IndexMin] ) + self.StoredVariables["CurrentOptimum"].store( self.StoredVariables["Analysis"][IndexMin] ) if self._toStore("SimulatedObservationAtCurrentOptimum"): - self.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( self.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] ) + self.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( self.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] ) if self._toStore("CostFunctionJbAtCurrentOptimum"): self.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( self.StoredVariables["CostFunctionJb"][IndexMin] ) if self._toStore("CostFunctionJoAtCurrentOptimum"): @@ -265,20 +276,21 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): Pf = (1./(__m-1)) * Pf Pn = (1. - K * Ht) * Pf self.StoredVariables["APosterioriCovariance"].store( Pn ) - if self._parameters["EstimationOf"] == "Parameters" \ - and J < previousJMinimum: - previousJMinimum = J - # Inutile ici : Xa = Xa - covarianceXa = Pn + if self._parameters["EstimationOf"] == "Parameters" \ + and J < previousJMinimum: + previousJMinimum = J + XaMin = Xa + if self._toStore("APosterioriCovariance"): + covarianceXaMin = Pn # # Stockage final supplémentaire de l'optimum en estimation de paramètres # ---------------------------------------------------------------------- if self._parameters["EstimationOf"] == "Parameters": - self.StoredVariables["Analysis"].store( Xa.A1 ) + self.StoredVariables["Analysis"].store( XaMin ) if self._toStore("APosterioriCovariance"): - self.StoredVariables["APosterioriCovariance"].store( covarianceXa ) + self.StoredVariables["APosterioriCovariance"].store( covarianceXaMin ) if self._toStore("BMA"): - self.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) ) + self.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) ) # self._post_run(HO) return 0 diff --git a/src/daComposant/daAlgorithms/ExtendedKalmanFilter.py b/src/daComposant/daAlgorithms/ExtendedKalmanFilter.py index 5670a6b..ecd5dac 100644 --- a/src/daComposant/daAlgorithms/ExtendedKalmanFilter.py +++ b/src/daComposant/daAlgorithms/ExtendedKalmanFilter.py @@ -69,8 +69,10 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): "CurrentOptimum", "CurrentState", "IndexOfOptimum", + "InnovationAtCurrentAnalysis", "InnovationAtCurrentState", "PredictedState", + "SimulatedObservationAtCurrentAnalysis", "SimulatedObservationAtCurrentOptimum", "SimulatedObservationAtCurrentState", ] @@ -129,7 +131,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): if self._toStore("APosterioriCovariance"): self.StoredVariables["APosterioriCovariance"].store( Pn.asfullmatrix(Xn.size) ) covarianceXa = Pn - Xa = Xn + Xa = XaMin = Xn previousJMinimum = numpy.finfo(float).max # for step in range(duration-1): @@ -188,38 +190,40 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): Xn = Xn_predicted + Pn_predicted * Ha * _u Kn = Pn_predicted * Ha * (R + numpy.dot(Ht, Pn_predicted * Ha)).I Pn = Pn_predicted - Kn * Ht * Pn_predicted + Xa, _HXa = Xn, _HX # Pointeurs # - self.StoredVariables["Analysis"].store( Xn ) + # ---> avec analysis + self.StoredVariables["Analysis"].store( Xa ) + if self._toStore("SimulatedObservationAtCurrentAnalysis"): + self.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa ) + if self._toStore("InnovationAtCurrentAnalysis"): + self.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation ) + # ---> avec current state if self._parameters["StoreInternalVariables"] \ - or self._toStore("CurrentState") \ - or self._toStore("CurrentOptimum"): + or self._toStore("CurrentState"): self.StoredVariables["CurrentState"].store( Xn ) if self._toStore("PredictedState"): self.StoredVariables["PredictedState"].store( Xn_predicted ) if self._toStore("BMA"): - self.StoredVariables["BMA"].store( Xn_predicted - Xn ) + self.StoredVariables["BMA"].store( Xn_predicted - Xa ) if self._toStore("InnovationAtCurrentState"): self.StoredVariables["InnovationAtCurrentState"].store( _Innovation ) if self._toStore("SimulatedObservationAtCurrentState") \ or self._toStore("SimulatedObservationAtCurrentOptimum"): self.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX ) + # ---> autres if self._parameters["StoreInternalVariables"] \ or self._toStore("CostFunctionJ") \ or self._toStore("CostFunctionJb") \ or self._toStore("CostFunctionJo") \ or self._toStore("CurrentOptimum") \ or self._toStore("APosterioriCovariance"): - Jb = float( 0.5 * (Xn - Xb).T * BI * (Xn - Xb) ) + Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) ) Jo = float( 0.5 * _Innovation.T * RI * _Innovation ) J = Jb + Jo self.StoredVariables["CostFunctionJb"].store( Jb ) self.StoredVariables["CostFunctionJo"].store( Jo ) self.StoredVariables["CostFunctionJ" ].store( J ) - if self._parameters["EstimationOf"] == "Parameters" \ - and J < previousJMinimum: - previousJMinimum = J - Xa = Xn - if self._toStore("APosterioriCovariance"): covarianceXa = Pn # if self._toStore("IndexOfOptimum") \ or self._toStore("CurrentOptimum") \ @@ -231,9 +235,9 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): if self._toStore("IndexOfOptimum"): self.StoredVariables["IndexOfOptimum"].store( IndexMin ) if self._toStore("CurrentOptimum"): - self.StoredVariables["CurrentOptimum"].store( self.StoredVariables["CurrentState"][IndexMin] ) + self.StoredVariables["CurrentOptimum"].store( self.StoredVariables["Analysis"][IndexMin] ) if self._toStore("SimulatedObservationAtCurrentOptimum"): - self.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( self.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] ) + self.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( self.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] ) if self._toStore("CostFunctionJbAtCurrentOptimum"): self.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( self.StoredVariables["CostFunctionJb"][IndexMin] ) if self._toStore("CostFunctionJoAtCurrentOptimum"): @@ -242,15 +246,21 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): self.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( self.StoredVariables["CostFunctionJ" ][IndexMin] ) if self._toStore("APosterioriCovariance"): self.StoredVariables["APosterioriCovariance"].store( Pn ) + if self._parameters["EstimationOf"] == "Parameters" \ + and J < previousJMinimum: + previousJMinimum = J + XaMin = Xa + if self._toStore("APosterioriCovariance"): + covarianceXaMin = Pn # # Stockage final supplémentaire de l'optimum en estimation de paramètres # ---------------------------------------------------------------------- if self._parameters["EstimationOf"] == "Parameters": - self.StoredVariables["Analysis"].store( Xa.A1 ) + self.StoredVariables["Analysis"].store( XaMin ) if self._toStore("APosterioriCovariance"): - self.StoredVariables["APosterioriCovariance"].store( covarianceXa ) + self.StoredVariables["APosterioriCovariance"].store( covarianceXaMin ) if self._toStore("BMA"): - self.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) ) + self.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) ) # self._post_run(HO) return 0 diff --git a/src/daComposant/daAlgorithms/KalmanFilter.py b/src/daComposant/daAlgorithms/KalmanFilter.py index e8115b4..bde6935 100644 --- a/src/daComposant/daAlgorithms/KalmanFilter.py +++ b/src/daComposant/daAlgorithms/KalmanFilter.py @@ -62,8 +62,10 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): "CurrentOptimum", "CurrentState", "IndexOfOptimum", + "InnovationAtCurrentAnalysis", "InnovationAtCurrentState", "PredictedState", + "SimulatedObservationAtCurrentAnalysis", "SimulatedObservationAtCurrentOptimum", "SimulatedObservationAtCurrentState", ] @@ -164,38 +166,40 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): Xn = Xn_predicted + Pn_predicted * Ha * _u Kn = Pn_predicted * Ha * (R + numpy.dot(Ht, Pn_predicted * Ha)).I Pn = Pn_predicted - Kn * Ht * Pn_predicted + Xa, _HXa = Xn, _HX # Pointeurs # - self.StoredVariables["Analysis"].store( Xn ) + # ---> avec analysis + self.StoredVariables["Analysis"].store( Xa ) + if self._toStore("SimulatedObservationAtCurrentAnalysis"): + self.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa ) + if self._toStore("InnovationAtCurrentAnalysis"): + self.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation ) + # ---> avec current state if self._parameters["StoreInternalVariables"] \ - or self._toStore("CurrentState") \ - or self._toStore("CurrentOptimum"): + or self._toStore("CurrentState"): self.StoredVariables["CurrentState"].store( Xn ) if self._toStore("PredictedState"): self.StoredVariables["PredictedState"].store( Xn_predicted ) if self._toStore("BMA"): - self.StoredVariables["BMA"].store( Xn_predicted - Xn ) + self.StoredVariables["BMA"].store( Xn_predicted - Xa ) if self._toStore("InnovationAtCurrentState"): self.StoredVariables["InnovationAtCurrentState"].store( _Innovation ) if self._toStore("SimulatedObservationAtCurrentState") \ or self._toStore("SimulatedObservationAtCurrentOptimum"): self.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX ) + # ---> autres if self._parameters["StoreInternalVariables"] \ or self._toStore("CostFunctionJ") \ or self._toStore("CostFunctionJb") \ or self._toStore("CostFunctionJo") \ or self._toStore("CurrentOptimum") \ or self._toStore("APosterioriCovariance"): - Jb = float( 0.5 * (Xn - Xb).T * BI * (Xn - Xb) ) + Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) ) Jo = float( 0.5 * _Innovation.T * RI * _Innovation ) J = Jb + Jo self.StoredVariables["CostFunctionJb"].store( Jb ) self.StoredVariables["CostFunctionJo"].store( Jo ) self.StoredVariables["CostFunctionJ" ].store( J ) - if self._parameters["EstimationOf"] == "Parameters" \ - and J < previousJMinimum: - previousJMinimum = J - Xa = Xn - if self._toStore("APosterioriCovariance"): covarianceXa = Pn # if self._toStore("IndexOfOptimum") \ or self._toStore("CurrentOptimum") \ @@ -207,9 +211,9 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): if self._toStore("IndexOfOptimum"): self.StoredVariables["IndexOfOptimum"].store( IndexMin ) if self._toStore("CurrentOptimum"): - self.StoredVariables["CurrentOptimum"].store( self.StoredVariables["CurrentState"][IndexMin] ) + self.StoredVariables["CurrentOptimum"].store( self.StoredVariables["Analysis"][IndexMin] ) if self._toStore("SimulatedObservationAtCurrentOptimum"): - self.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( self.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] ) + self.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( self.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] ) if self._toStore("CostFunctionJbAtCurrentOptimum"): self.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( self.StoredVariables["CostFunctionJb"][IndexMin] ) if self._toStore("CostFunctionJoAtCurrentOptimum"): @@ -218,15 +222,21 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): self.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( self.StoredVariables["CostFunctionJ" ][IndexMin] ) if self._toStore("APosterioriCovariance"): self.StoredVariables["APosterioriCovariance"].store( Pn ) + if self._parameters["EstimationOf"] == "Parameters" \ + and J < previousJMinimum: + previousJMinimum = J + XaMin = Xa + if self._toStore("APosterioriCovariance"): + covarianceXaMin = Pn # # Stockage final supplémentaire de l'optimum en estimation de paramètres # ---------------------------------------------------------------------- if self._parameters["EstimationOf"] == "Parameters": - self.StoredVariables["Analysis"].store( Xa.A1 ) + self.StoredVariables["Analysis"].store( XaMin ) if self._toStore("APosterioriCovariance"): - self.StoredVariables["APosterioriCovariance"].store( covarianceXa ) + self.StoredVariables["APosterioriCovariance"].store( covarianceXaMin ) if self._toStore("BMA"): - self.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) ) + self.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) ) # self._post_run(HO) return 0 diff --git a/src/daComposant/daAlgorithms/UnscentedKalmanFilter.py b/src/daComposant/daAlgorithms/UnscentedKalmanFilter.py index bb3da55..f4fc2fb 100644 --- a/src/daComposant/daAlgorithms/UnscentedKalmanFilter.py +++ b/src/daComposant/daAlgorithms/UnscentedKalmanFilter.py @@ -172,7 +172,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): if self._toStore("APosterioriCovariance"): self.StoredVariables["APosterioriCovariance"].store( Pn ) covarianceXa = Pn - Xa = Xn + Xa = XaMin = Xb previousJMinimum = numpy.finfo(float).max # for step in range(duration-1): @@ -268,12 +268,15 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): if self._parameters["Bounds"] is not None and self._parameters["ConstrainedBy"] == "EstimateProjection": Xn = numpy.max(numpy.hstack((Xn,numpy.asmatrix(self._parameters["Bounds"])[:,0])),axis=1) Xn = numpy.min(numpy.hstack((Xn,numpy.asmatrix(self._parameters["Bounds"])[:,1])),axis=1) + Xa = Xn # Pointeurs # - self.StoredVariables["Analysis"].store( Xn.A1 ) + # ---> avec analysis + self.StoredVariables["Analysis"].store( Xa ) if self._toStore("APosterioriCovariance"): self.StoredVariables["APosterioriCovariance"].store( Pn ) + # ---> avec current state if self._toStore("InnovationAtCurrentState"): - self.StoredVariables["InnovationAtCurrentState"].store( numpy.ravel( d.A1 ) ) + self.StoredVariables["InnovationAtCurrentState"].store( d ) if self._parameters["StoreInternalVariables"] \ or self._toStore("CurrentState"): self.StoredVariables["CurrentState"].store( Xn ) @@ -281,28 +284,27 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): or self._toStore("CostFunctionJ") \ or self._toStore("CostFunctionJb") \ or self._toStore("CostFunctionJo"): - Jb = 0.5 * (Xn - Xb).T * BI * (Xn - Xb) - Jo = 0.5 * d.T * RI * d - J = float( Jb ) + float( Jo ) + Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) ) + Jo = float( 0.5 * d.T * RI * d ) + J = Jb + Jo self.StoredVariables["CostFunctionJb"].store( Jb ) self.StoredVariables["CostFunctionJo"].store( Jo ) self.StoredVariables["CostFunctionJ" ].store( J ) - if J < previousJMinimum: - previousJMinimum = J - Xa = Xn - if self._toStore("APosterioriCovariance"): - covarianceXa = Pn - else: - Xa = Xn + if self._parameters["EstimationOf"] == "Parameters" \ + and J < previousJMinimum: + previousJMinimum = J + XaMin = Xa + if self._toStore("APosterioriCovariance"): + covarianceXaMin = Pn # - # Stockage supplementaire de l'optimum en estimation de parametres - # ---------------------------------------------------------------- + # Stockage final supplémentaire de l'optimum en estimation de paramètres + # ---------------------------------------------------------------------- if self._parameters["EstimationOf"] == "Parameters": - self.StoredVariables["Analysis"].store( Xa.A1 ) + self.StoredVariables["Analysis"].store( XaMin ) if self._toStore("APosterioriCovariance"): - self.StoredVariables["APosterioriCovariance"].store( covarianceXa ) + self.StoredVariables["APosterioriCovariance"].store( covarianceXaMin ) if self._toStore("BMA"): - self.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) ) + self.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) ) # self._post_run(HO) return 0 diff --git a/src/daComposant/daCore/BasicObjects.py b/src/daComposant/daCore/BasicObjects.py index 0099c21..3237c13 100644 --- a/src/daComposant/daCore/BasicObjects.py +++ b/src/daComposant/daCore/BasicObjects.py @@ -628,6 +628,7 @@ class Algorithm(object): self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo") self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum") self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation") + self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis") self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState") self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground") self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState") @@ -641,6 +642,7 @@ class Algorithm(object): self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2") self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2") self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground") + self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis") self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum") self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState") self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")