From: Jean-Philippe ARGAUD Date: Tue, 20 Jan 2015 21:03:21 +0000 (+0100) Subject: Minor documentation and source correction for outputs X-Git-Tag: V7_5_1rc2~1 X-Git-Url: http://git.salome-platform.org/gitweb/?a=commitdiff_plain;h=fec1983f6538d1e421147346eefe5283fbb9aa33;p=modules%2Fadao.git Minor documentation and source correction for outputs --- diff --git a/doc/en/ref_algorithm_3DVAR.rst b/doc/en/ref_algorithm_3DVAR.rst index b7d4170..e3221ad 100644 --- a/doc/en/ref_algorithm_3DVAR.rst +++ b/doc/en/ref_algorithm_3DVAR.rst @@ -306,6 +306,12 @@ The conditional outputs of the algorithm are the following: Example : ``hxb = ADD.get("SimulatedObservationAtBackground")[-1]`` + SimulatedObservationAtCurrentState + *List of vectors*. Each element is an observed vector at the current state, + that is, in the observation space. + + Example : ``Ys = ADD.get("SimulatedObservationAtCurrentState")[-1]`` + SimulatedObservationAtOptimum *List of vectors*. Each element is a vector of observation simulated from the analysis or optimal state :math:`\mathbf{x}^a`. diff --git a/doc/en/ref_algorithm_NonLinearLeastSquares.rst b/doc/en/ref_algorithm_NonLinearLeastSquares.rst index 06d062f..9dfed31 100644 --- a/doc/en/ref_algorithm_NonLinearLeastSquares.rst +++ b/doc/en/ref_algorithm_NonLinearLeastSquares.rst @@ -242,6 +242,12 @@ The conditional outputs of the algorithm are the following: Example : ``omb = ADD.get("OMB")[-1]`` + SimulatedObservationAtCurrentState + *List of vectors*. Each element is an observed vector at the current state, + that is, in the observation space. + + Example : ``Ys = ADD.get("SimulatedObservationAtCurrentState")[-1]`` + SimulatedObservationAtOptimum *List of vectors*. Each element is a vector of observation simulated from the analysis or optimal state :math:`\mathbf{x}^a`. diff --git a/doc/en/ref_algorithm_ParticleSwarmOptimization.rst b/doc/en/ref_algorithm_ParticleSwarmOptimization.rst index c951355..136f328 100644 --- a/doc/en/ref_algorithm_ParticleSwarmOptimization.rst +++ b/doc/en/ref_algorithm_ParticleSwarmOptimization.rst @@ -246,6 +246,12 @@ The conditional outputs of the algorithm are the following: Example : ``hxb = ADD.get("SimulatedObservationAtBackground")[-1]`` + SimulatedObservationAtCurrentState + *List of vectors*. Each element is an observed vector at the current state, + that is, in the observation space. + + Example : ``Ys = ADD.get("SimulatedObservationAtCurrentState")[-1]`` + SimulatedObservationAtOptimum *List of vectors*. Each element is a vector of observation simulated from the analysis or optimal state :math:`\mathbf{x}^a`. diff --git a/doc/en/ref_algorithm_QuantileRegression.rst b/doc/en/ref_algorithm_QuantileRegression.rst index c1f83a0..406fa5c 100644 --- a/doc/en/ref_algorithm_QuantileRegression.rst +++ b/doc/en/ref_algorithm_QuantileRegression.rst @@ -201,6 +201,12 @@ The conditional outputs of the algorithm are the following: Example : ``hxb = ADD.get("SimulatedObservationAtBackground")[-1]`` + SimulatedObservationAtCurrentState + *List of vectors*. Each element is an observed vector at the current state, + that is, in the observation space. + + Example : ``Ys = ADD.get("SimulatedObservationAtCurrentState")[-1]`` + SimulatedObservationAtOptimum *List of vectors*. Each element is a vector of observation simulated from the analysis or optimal state :math:`\mathbf{x}^a`. diff --git a/doc/en/ref_algorithm_SamplingTest.rst b/doc/en/ref_algorithm_SamplingTest.rst index 48f51f3..a2aa2d1 100644 --- a/doc/en/ref_algorithm_SamplingTest.rst +++ b/doc/en/ref_algorithm_SamplingTest.rst @@ -173,9 +173,9 @@ The options of the algorithm are the following: calculations or memory consumptions. The default is a void list, none of these variables being calculated and stored by default. The possible names are in the following list: ["CostFunctionJ", "CurrentState", "Innovation", - "ObservedState"]. + "SimulatedObservationAtCurrentState"]. - Example : ``{"StoreSupplementaryCalculations":["CostFunctionJ", "ObservedState"]}`` + Example : ``{"StoreSupplementaryCalculations":["CostFunctionJ", "SimulatedObservationAtCurrentState"]}`` See also ++++++++ diff --git a/doc/en/ref_output_variables.rst b/doc/en/ref_output_variables.rst index edc1aa4..8399065 100644 --- a/doc/en/ref_output_variables.rst +++ b/doc/en/ref_output_variables.rst @@ -261,12 +261,6 @@ of availability. They are the following, in alphabetical order: Example : ``m = ADD.get("MahalanobisConsistency")[-1]`` - ObservedState - *List of vectors*. Each element is an observed state vector, that is, in the - observation space. - - Example : ``Ys = ADD.get("ObservedState")[-1]`` - OMA *List of vectors*. Each element is a vector of difference between the observation and the optimal state in the observation space. @@ -297,6 +291,12 @@ of availability. They are the following, in alphabetical order: Example : ``hxb = ADD.get("SimulatedObservationAtBackground")[-1]`` + SimulatedObservationAtCurrentState + *List of vectors*. Each element is an observed vector at the current state, + that is, in the observation space. + + Example : ``Ys = ADD.get("SimulatedObservationAtCurrentState")[-1]`` + SimulatedObservationAtOptimum *List of vectors*. Each element is a vector of observation simulated from the analysis or optimal state :math:`\mathbf{x}^a`. diff --git a/doc/fr/ref_algorithm_3DVAR.rst b/doc/fr/ref_algorithm_3DVAR.rst index c83abf1..6fe7797 100644 --- a/doc/fr/ref_algorithm_3DVAR.rst +++ b/doc/fr/ref_algorithm_3DVAR.rst @@ -315,6 +315,12 @@ Les sorties conditionnelles de l'algorithme sont les suivantes: Exemple : ``hxb = ADD.get("SimulatedObservationAtBackground")[-1]`` + SimulatedObservationAtCurrentState + *Liste de vecteurs*. Chaque élément est un vecteur observé à l'état courant, + c'est-à-dire dans l'espace des observations. + + Exemple : ``Ys = ADD.get("SimulatedObservationAtCurrentState")[-1]`` + SimulatedObservationAtOptimum *Liste de vecteurs*. Chaque élément est un vecteur d'observation simulé à partir de l'analyse ou de l'état optimal :math:`\mathbf{x}^a`. diff --git a/doc/fr/ref_algorithm_NonLinearLeastSquares.rst b/doc/fr/ref_algorithm_NonLinearLeastSquares.rst index 542ad2b..90f5d02 100644 --- a/doc/fr/ref_algorithm_NonLinearLeastSquares.rst +++ b/doc/fr/ref_algorithm_NonLinearLeastSquares.rst @@ -253,6 +253,12 @@ Les sorties conditionnelles de l'algorithme sont les suivantes: Exemple : ``omb = ADD.get("OMB")[-1]`` + SimulatedObservationAtCurrentState + *Liste de vecteurs*. Chaque élément est un vecteur observé à l'état courant, + c'est-à-dire dans l'espace des observations. + + Exemple : ``Ys = ADD.get("SimulatedObservationAtCurrentState")[-1]`` + SimulatedObservationAtOptimum *Liste de vecteurs*. Chaque élément est un vecteur d'observation simulé à partir de l'analyse ou de l'état optimal :math:`\mathbf{x}^a`. diff --git a/doc/fr/ref_algorithm_ParticleSwarmOptimization.rst b/doc/fr/ref_algorithm_ParticleSwarmOptimization.rst index ffc989f..110f951 100644 --- a/doc/fr/ref_algorithm_ParticleSwarmOptimization.rst +++ b/doc/fr/ref_algorithm_ParticleSwarmOptimization.rst @@ -253,6 +253,12 @@ Les sorties conditionnelles de l'algorithme sont les suivantes: Exemple : ``hxb = ADD.get("SimulatedObservationAtBackground")[-1]`` + SimulatedObservationAtCurrentState + *Liste de vecteurs*. Chaque élément est un vecteur observé à l'état courant, + c'est-à-dire dans l'espace des observations. + + Exemple : ``Ys = ADD.get("SimulatedObservationAtCurrentState")[-1]`` + SimulatedObservationAtOptimum *Liste de vecteurs*. Chaque élément est un vecteur d'observation simulé à partir de l'analyse ou de l'état optimal :math:`\mathbf{x}^a`. diff --git a/doc/fr/ref_algorithm_QuantileRegression.rst b/doc/fr/ref_algorithm_QuantileRegression.rst index 9a1b4d0..fe28ed6 100644 --- a/doc/fr/ref_algorithm_QuantileRegression.rst +++ b/doc/fr/ref_algorithm_QuantileRegression.rst @@ -206,6 +206,12 @@ Les sorties conditionnelles de l'algorithme sont les suivantes: Exemple : ``hxb = ADD.get("SimulatedObservationAtBackground")[-1]`` + SimulatedObservationAtCurrentState + *Liste de vecteurs*. Chaque élément est un vecteur observé à l'état courant, + c'est-à-dire dans l'espace des observations. + + Exemple : ``Ys = ADD.get("SimulatedObservationAtCurrentState")[-1]`` + SimulatedObservationAtOptimum *Liste de vecteurs*. Chaque élément est un vecteur d'observation simulé à partir de l'analyse ou de l'état optimal :math:`\mathbf{x}^a`. diff --git a/doc/fr/ref_algorithm_SamplingTest.rst b/doc/fr/ref_algorithm_SamplingTest.rst index 1046e21..dbdc6df 100644 --- a/doc/fr/ref_algorithm_SamplingTest.rst +++ b/doc/fr/ref_algorithm_SamplingTest.rst @@ -180,9 +180,9 @@ Les options de l'algorithme sont les suivantes: calculs ou du stockage coûteux. La valeur par défaut est une liste vide, aucune de ces variables n'étant calculée et stockée par défaut. Les noms possibles sont dans la liste suivante : ["CostFunctionJ", "CurrentState", - "Innovation", "ObservedState"]. + "Innovation", "SimulatedObservationAtCurrentState"]. - Exemple : ``{"StoreSupplementaryCalculations":["CostFunctionJ", "ObservedState"]}`` + Exemple : ``{"StoreSupplementaryCalculations":["CostFunctionJ", "SimulatedObservationAtCurrentState"]}`` Voir aussi ++++++++++ diff --git a/doc/fr/ref_output_variables.rst b/doc/fr/ref_output_variables.rst index d31383f..5941bf2 100644 --- a/doc/fr/ref_output_variables.rst +++ b/doc/fr/ref_output_variables.rst @@ -270,12 +270,6 @@ alphab Exemple : ``m = ADD.get("MahalanobisConsistency")[-1]`` - ObservedState - *Liste de vecteurs*. Chaque élément est un vecteur d'état observé, - c'est-à-dire dans l'espace des observations. - - Exemple : ``Ys = ADD.get("ObservedState")[-1]`` - OMA *Liste de vecteurs*. Chaque élément est un vecteur d'écart entre l'observation et l'état optimal dans l'espace des observations. @@ -306,6 +300,12 @@ alphab Exemple : ``hxb = ADD.get("SimulatedObservationAtBackground")[-1]`` + SimulatedObservationAtCurrentState + *Liste de vecteurs*. Chaque élément est un vecteur observé à l'état courant, + c'est-à-dire dans l'espace des observations. + + Exemple : ``Ys = ADD.get("SimulatedObservationAtCurrentState")[-1]`` + SimulatedObservationAtOptimum *Liste de vecteurs*. Chaque élément est un vecteur d'observation simulé à partir de l'analyse ou de l'état optimal :math:`\mathbf{x}^a`. diff --git a/src/daComposant/daAlgorithms/3DVAR.py b/src/daComposant/daAlgorithms/3DVAR.py index 3e27be3..2ac2931 100644 --- a/src/daComposant/daAlgorithms/3DVAR.py +++ b/src/daComposant/daAlgorithms/3DVAR.py @@ -72,7 +72,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): default = [], typecast = tuple, message = "Liste de calculs supplémentaires à stocker et/ou effectuer", - listval = ["APosterioriCovariance", "BMA", "OMA", "OMB", "Innovation", "SigmaObs2", "MahalanobisConsistency", "SimulationQuantiles", "SimulatedObservationAtBackground", "SimulatedObservationAtOptimum"] + listval = ["APosterioriCovariance", "BMA", "OMA", "OMB", "Innovation", "SigmaObs2", "MahalanobisConsistency", "SimulationQuantiles", "SimulatedObservationAtBackground", "SimulatedObservationAtCurrentState", "SimulatedObservationAtOptimum"] ) self.defineRequiredParameter( name = "Quantiles", @@ -162,6 +162,8 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): J = float( Jb ) + float( Jo ) if self._parameters["StoreInternalVariables"]: self.StoredVariables["CurrentState"].store( _X ) + if "SimulatedObservationAtCurrentState" in self._parameters["StoreSupplementaryCalculations"]: + self.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX ) self.StoredVariables["CostFunctionJb"].store( Jb ) self.StoredVariables["CostFunctionJo"].store( Jo ) self.StoredVariables["CostFunctionJ" ].store( J ) diff --git a/src/daComposant/daAlgorithms/NonLinearLeastSquares.py b/src/daComposant/daAlgorithms/NonLinearLeastSquares.py index b593773..0d08f26 100644 --- a/src/daComposant/daAlgorithms/NonLinearLeastSquares.py +++ b/src/daComposant/daAlgorithms/NonLinearLeastSquares.py @@ -72,7 +72,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): default = [], typecast = tuple, message = "Liste de calculs supplémentaires à stocker et/ou effectuer", - listval = ["BMA", "OMA", "OMB", "Innovation", "SimulatedObservationAtOptimum"] + listval = ["BMA", "OMA", "OMB", "Innovation", "SimulatedObservationAtCurrentState", "SimulatedObservationAtOptimum"] ) def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None): @@ -136,6 +136,8 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): J = float( Jb ) + float( Jo ) if self._parameters["StoreInternalVariables"]: self.StoredVariables["CurrentState"].store( _X ) + if "SimulatedObservationAtCurrentState" in self._parameters["StoreSupplementaryCalculations"]: + self.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX ) self.StoredVariables["CostFunctionJb"].store( Jb ) self.StoredVariables["CostFunctionJo"].store( Jo ) self.StoredVariables["CostFunctionJ" ].store( J ) diff --git a/src/daComposant/daAlgorithms/ParticleSwarmOptimization.py b/src/daComposant/daAlgorithms/ParticleSwarmOptimization.py index ccf148f..6d4cc5a 100644 --- a/src/daComposant/daAlgorithms/ParticleSwarmOptimization.py +++ b/src/daComposant/daAlgorithms/ParticleSwarmOptimization.py @@ -84,7 +84,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): default = [], typecast = tuple, message = "Liste de calculs supplémentaires à stocker et/ou effectuer", - listval = ["BMA", "OMA", "OMB", "Innovation", "SimulatedObservationAtBackground", "SimulatedObservationAtOptimum"] + listval = ["BMA", "OMA", "OMB", "Innovation", "SimulatedObservationAtBackground", "SimulatedObservationAtCurrentState", "SimulatedObservationAtOptimum"] ) def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None): @@ -150,6 +150,8 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): self.StoredVariables["CostFunctionJb"].store( Jb ) self.StoredVariables["CostFunctionJo"].store( Jo ) self.StoredVariables["CostFunctionJ" ].store( J ) + if "SimulatedObservationAtCurrentState" in self._parameters["StoreSupplementaryCalculations"]: + self.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX ) return J # # Point de démarrage de l'optimisation : Xini = Xb diff --git a/src/daComposant/daAlgorithms/QuantileRegression.py b/src/daComposant/daAlgorithms/QuantileRegression.py index ce51631..482216f 100644 --- a/src/daComposant/daAlgorithms/QuantileRegression.py +++ b/src/daComposant/daAlgorithms/QuantileRegression.py @@ -67,7 +67,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): default = [], typecast = tuple, message = "Liste de calculs supplémentaires à stocker et/ou effectuer", - listval = ["BMA", "OMA", "OMB", "Innovation", "SimulatedObservationAtBackground", "SimulatedObservationAtOptimum"] + listval = ["BMA", "OMA", "OMB", "Innovation", "SimulatedObservationAtBackground", "SimulatedObservationAtCurrentState", "SimulatedObservationAtOptimum"] ) def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None): @@ -114,6 +114,8 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): J = Jb + Jo if self._parameters["StoreInternalVariables"]: self.StoredVariables["CurrentState"].store( _X ) + if "SimulatedObservationAtCurrentState" in self._parameters["StoreSupplementaryCalculations"]: + self.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX ) self.StoredVariables["CostFunctionJb"].store( Jb ) self.StoredVariables["CostFunctionJo"].store( Jo ) self.StoredVariables["CostFunctionJ" ].store( J ) diff --git a/src/daComposant/daAlgorithms/SamplingTest.py b/src/daComposant/daAlgorithms/SamplingTest.py index 6a58f68..7856b23 100644 --- a/src/daComposant/daAlgorithms/SamplingTest.py +++ b/src/daComposant/daAlgorithms/SamplingTest.py @@ -74,7 +74,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): default = [], typecast = tuple, message = "Liste de calculs supplémentaires à stocker et/ou effectuer", - listval = ["CostFunctionJ","CurrentState","Innovation","ObservedState"] + listval = ["CostFunctionJ","CurrentState","Innovation","SimulatedObservationAtCurrentState"] ) self.defineRequiredParameter( name = "SetSeed", @@ -157,8 +157,8 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): self.StoredVariables["CurrentState"].store( _X ) if "Innovation" in self._parameters["StoreSupplementaryCalculations"]: self.StoredVariables["Innovation"].store( Y - _HX ) - if "ObservedState" in self._parameters["StoreSupplementaryCalculations"]: - self.StoredVariables["ObservedState"].store( _HX ) + if "SimulatedObservationAtCurrentState" in self._parameters["StoreSupplementaryCalculations"]: + self.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX ) self.StoredVariables["CostFunctionJb"].store( Jb ) self.StoredVariables["CostFunctionJo"].store( Jo ) self.StoredVariables["CostFunctionJ" ].store( J ) diff --git a/src/daComposant/daCore/BasicObjects.py b/src/daComposant/daCore/BasicObjects.py index f63821d..0e83daa 100644 --- a/src/daComposant/daCore/BasicObjects.py +++ b/src/daComposant/daCore/BasicObjects.py @@ -262,8 +262,8 @@ class Algorithm: - CurrentState : état courant lors d'itérations - Analysis : l'analyse Xa - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche + - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum - - ObservedState : l'état observé H(X) - Innovation : l'innovation : d = Y - H(X) - SigmaObs2 : indicateur de correction optimale des erreurs d'observation - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche @@ -283,26 +283,26 @@ class Algorithm: self.__required_parameters = {} self.StoredVariables = {} # - self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ") - self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb") - self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo") - self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ") - self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb") - self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo") - self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState") - self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis") - self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground") - self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum") - self.StoredVariables["ObservedState"] = Persistence.OneVector(name = "ObservedState") - self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation") - self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2") - self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2") - self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency") - self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA") - self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB") - self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA") - self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance") - self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles") + self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ") + self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb") + self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo") + self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ") + self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb") + self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo") + self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState") + self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis") + self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground") + self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState") + self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum") + self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation") + self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2") + self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2") + self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency") + self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA") + self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB") + self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA") + self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance") + self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles") def _pre_run(self): logging.debug("%s Lancement"%self._name) diff --git a/src/daSalome/daYacsSchemaCreator/infos_daComposant.py b/src/daSalome/daYacsSchemaCreator/infos_daComposant.py index 012e9cd..0aed5da 100644 --- a/src/daSalome/daYacsSchemaCreator/infos_daComposant.py +++ b/src/daSalome/daYacsSchemaCreator/infos_daComposant.py @@ -247,7 +247,7 @@ ObserversList = [ "Analysis", "CurrentState", "Innovation", - "ObservedState", + "SimulatedObservationAtCurrentState", "OMA", "OMB", "BMA",