From 23cd5b6179b6b06cd38f6d134deec370b84a0f63 Mon Sep 17 00:00:00 2001 From: Jean-Philippe ARGAUD Date: Mon, 6 Feb 2017 10:13:30 +0100 Subject: [PATCH] Documentation corrections and improvements --- doc/en/advanced.rst | 4 ++-- doc/en/examples.rst | 10 +++++----- doc/en/glossary.rst | 2 +- doc/en/ref_algorithm_3DVAR.rst | 17 +++++++++-------- doc/en/ref_algorithm_4DVAR.rst | 8 ++++---- doc/en/ref_algorithm_AdjointTest.rst | 2 +- doc/en/ref_algorithm_Blue.rst | 6 +++--- ...f_algorithm_DerivativeFreeOptimization.rst | 16 ++++++++-------- doc/en/ref_algorithm_ExtendedKalmanFilter.rst | 2 +- doc/en/ref_algorithm_GradientTest.rst | 12 ++++++------ doc/en/ref_algorithm_KalmanFilter.rst | 2 +- doc/en/ref_algorithm_LinearityTest.rst | 2 +- .../ref_algorithm_NonLinearLeastSquares.rst | 3 ++- ...ef_algorithm_ParticleSwarmOptimization.rst | 4 ++-- doc/en/ref_algorithm_SamplingTest.rst | 11 +++++------ doc/en/ref_algorithm_TangentTest.rst | 2 +- doc/en/ref_assimilation_keywords.rst | 2 +- doc/en/ref_covariance_requirements.rst | 2 +- doc/en/ref_observers_requirements.rst | 14 +++++++------- doc/en/ref_operator_requirements.rst | 19 +++++++++---------- doc/en/ref_options_AlgorithmParameters.rst | 6 +++--- doc/en/ref_output_variables.rst | 12 ++++++------ doc/en/theory.rst | 2 +- doc/en/tui.rst | 10 ++-------- doc/en/using.rst | 2 +- doc/fr/bibliography.rst | 2 +- doc/fr/examples.rst | 9 +++++---- doc/fr/ref_algorithm_3DVAR.rst | 1 + doc/fr/ref_algorithm_ExtendedKalmanFilter.rst | 2 +- doc/fr/ref_algorithm_GradientTest.rst | 6 +++--- doc/fr/ref_algorithm_LinearityTest.rst | 4 ++-- .../ref_algorithm_NonLinearLeastSquares.rst | 3 ++- doc/fr/ref_algorithm_SamplingTest.rst | 5 ++--- .../ref_algorithm_UnscentedKalmanFilter.rst | 2 +- doc/fr/ref_entry_types.rst | 2 +- doc/fr/theory.rst | 2 +- doc/fr/tui.rst | 10 ++-------- doc/fr/using.rst | 6 +++--- src/daComposant/daCore/Templates.py | 12 ++++++------ .../daYacsIntegration/daOptimizerLoop.py | 2 +- 40 files changed, 115 insertions(+), 125 deletions(-) diff --git a/doc/en/advanced.rst b/doc/en/advanced.rst index 80ea41c..fe6b5dc 100644 --- a/doc/en/advanced.rst +++ b/doc/en/advanced.rst @@ -71,7 +71,7 @@ the following Shell script:: It is then required to change it to be in executable mode. -A more complete example consist in launching execution of a YACS sheme given by +A more complete example consist in launching execution of a YACS scheme given by the user, having previously verified its availability. For that, replacing the text ````, one needs only to save the following Shell script:: @@ -389,7 +389,7 @@ the following ones: #. The dimension of the state vector is more than 2 or 3. #. Unitary calculation of user defined direct function "last for long time", that is, more than few minutes. -#. The user defined direct function doesn't already use parallelism (or parallel execution is disabled in the user calculation). +#. The user defined direct function does not already use parallelism (or parallel execution is disabled in the user calculation). #. The user defined direct function avoids read/write access to common resources, mainly stored data, output files or memory capacities. #. The "*observer*" added by the user avoid read/write access to common resources, such as files or memory. diff --git a/doc/en/examples.rst b/doc/en/examples.rst index 3823f20..63336ee 100644 --- a/doc/en/examples.rst +++ b/doc/en/examples.rst @@ -322,7 +322,7 @@ dictionary. All the keys inside the dictionary are optional, they all have default values, and can exist without being used. For example:: AlgorithmParameters = { - "Minimizer" : "CG", # Possible choice : "LBFGSB", "TNC", "CG", "BFGS" + "Minimizer" : "LBFGSB", # Recommended "MaximumNumberOfSteps" : 10, } @@ -330,7 +330,7 @@ If no bounds at all are required on the control variables, then one can choose the "*BFGS*" or "*CG*" minimization algorithm for all the variational data assimilation or optimization algorithms. For constrained optimization, the minimizer "*LBFGSB*" is often more robust, but the "*TNC*" is sometimes more -effective. +effective. In a general way, the "*LBFGSB*" algorithm choice is recommended. Then the script can be added to the ADAO case, in a file entry describing the "*AlgorithmParameters*" keyword, as follows: @@ -367,7 +367,7 @@ Experimental setup We continue to operate in a 3-dimensional space, in order to restrict the size of numerical object shown in the scripts, but the problem is -not dependant of the dimension. +not dependent of the dimension. We choose a twin experiment context, using a known true state :math:`\mathbf{x}^t` but of arbitrary value:: @@ -402,7 +402,7 @@ covariances matrix :math:`\mathbf{R}` are generated by using the true state and, with an arbitrary standard deviation of 1% on each error component:: - R = 0.0001 * diagonal( lenght(Yo) ) + R = 0.0001 * diagonal( length(Yo) ) All the information required for estimation by data assimilation are then defined. @@ -545,7 +545,7 @@ the following parameters can be defined in a Python script file named # Creating the required ADAO variable # ----------------------------------- AlgorithmParameters = { - "Minimizer" : "TNC", # Possible : "LBFGSB", "TNC", "CG", "BFGS" + "Minimizer" : "LBFGSB", # Recommended "MaximumNumberOfSteps" : 15, # Number of global iterative steps "Bounds" : [ [ None, None ], # Bound on the first parameter diff --git a/doc/en/glossary.rst b/doc/en/glossary.rst index 94d76fb..9a09aa2 100644 --- a/doc/en/glossary.rst +++ b/doc/en/glossary.rst @@ -97,7 +97,7 @@ Glossary procedure. background - The *a priori* known state, which is not optimal, and is used as a rought + The *a priori* known state, which is not optimal, and is used as a rough estimate, or a "best estimate", before an optimal estimation. innovation diff --git a/doc/en/ref_algorithm_3DVAR.rst b/doc/en/ref_algorithm_3DVAR.rst index 774a753..bfc309a 100644 --- a/doc/en/ref_algorithm_3DVAR.rst +++ b/doc/en/ref_algorithm_3DVAR.rst @@ -178,7 +178,7 @@ The options of the algorithm are the following: Quantiles This list indicates the values of quantile, between 0 and 1, to be estimated by simulation around the optimal state. The sampling uses a multivariate - gaussian random sampling, directed by the *a posteriori* covariance matrix. + Gaussian random sampling, directed by the *a posteriori* covariance matrix. This option is useful only if the supplementary calculation "SimulationQuantiles" has been chosen. The default is a void list. @@ -205,7 +205,7 @@ The options of the algorithm are the following: This key indicates the type of simulation, linear (with the tangent observation operator applied to perturbation increments around the optimal state) or non-linear (with standard observation operator applied to - perturbated states), one want to do for each perturbation. It changes mainly + perturbed states), one want to do for each perturbation. It changes mainly the time of each elementary calculation, usually longer in non-linear than in linear. This option is useful only if the supplementary calculation "SimulationQuantiles" has been chosen. The default value is "Linear", and @@ -269,7 +269,7 @@ The conditional outputs of the algorithm are the following: errors diagonal matrix of the optimal state, coming from the :math:`\mathbf{A}*` covariance matrix. - Exemple : ``S = ADD.get("APosterioriStandardDeviations")[-1]`` + Example : ``S = ADD.get("APosterioriStandardDeviations")[-1]`` APosterioriVariances *List of matrices*. Each element is an *a posteriori* error variance @@ -307,9 +307,9 @@ The conditional outputs of the algorithm are the following: CurrentOptimum *List of vectors*. Each element is the optimal state obtained at the current - step of the optimization algorithm. It is not necessarely the last state. + step of the optimization algorithm. It is not necessarily the last state. - Exemple : ``Xo = ADD.get("CurrentOptimum")[:]`` + Example : ``Xo = ADD.get("CurrentOptimum")[:]`` CurrentState *List of vectors*. Each element is a usual state vector used during the @@ -320,9 +320,9 @@ The conditional outputs of the algorithm are the following: IndexOfOptimum *List of integers*. Each element is the iteration index of the optimum obtained at the current step the optimization algorithm. It is not - necessarely the number of the last iteration. + necessarily the number of the last iteration. - Exemple : ``i = ADD.get("IndexOfOptimum")[-1]`` + Example : ``i = ADD.get("IndexOfOptimum")[-1]`` Innovation *List of vectors*. Each element is an innovation vector, which is in static @@ -371,7 +371,7 @@ The conditional outputs of the algorithm are the following: the optimal state obtained at the current step the optimization algorithm, that is, in the observation space. - Exemple : ``hxo = ADD.get("SimulatedObservationAtCurrentOptimum")[-1]`` + Example : ``hxo = ADD.get("SimulatedObservationAtCurrentOptimum")[-1]`` SimulatedObservationAtCurrentState *List of vectors*. Each element is an observed vector at the current state, @@ -404,3 +404,4 @@ Bibliographical references: - [Byrd95]_ - [Morales11]_ - [Talagrand97]_ + - [Zhu97]_ diff --git a/doc/en/ref_algorithm_4DVAR.rst b/doc/en/ref_algorithm_4DVAR.rst index 525931c..1876e6f 100644 --- a/doc/en/ref_algorithm_4DVAR.rst +++ b/doc/en/ref_algorithm_4DVAR.rst @@ -256,9 +256,9 @@ The conditional outputs of the algorithm are the following: CurrentOptimum *List of vectors*. Each element is the optimal state obtained at the current - step of the optimization algorithm. It is not necessarely the last state. + step of the optimization algorithm. It is not necessarily the last state. - Exemple : ``Xo = ADD.get("CurrentOptimum")[:]`` + Example : ``Xo = ADD.get("CurrentOptimum")[:]`` CurrentState *List of vectors*. Each element is a usual state vector used during the @@ -269,9 +269,9 @@ The conditional outputs of the algorithm are the following: IndexOfOptimum *List of integers*. Each element is the iteration index of the optimum obtained at the current step the optimization algorithm. It is not - necessarely the number of the last iteration. + necessarily the number of the last iteration. - Exemple : ``i = ADD.get("IndexOfOptimum")[-1]`` + Example : ``i = ADD.get("IndexOfOptimum")[-1]`` See also ++++++++ diff --git a/doc/en/ref_algorithm_AdjointTest.rst b/doc/en/ref_algorithm_AdjointTest.rst index ab6da36..fdb8afb 100644 --- a/doc/en/ref_algorithm_AdjointTest.rst +++ b/doc/en/ref_algorithm_AdjointTest.rst @@ -140,7 +140,7 @@ writing of post-processing procedures, are described in the The unconditional outputs of the algorithm are the following: Residu - *List of values*. Each element is the value of the particular residu + *List of values*. Each element is the value of the particular residue verified during a checking algorithm, in the order of the tests. Example : ``r = ADD.get("Residu")[:]`` diff --git a/doc/en/ref_algorithm_Blue.rst b/doc/en/ref_algorithm_Blue.rst index c3aac7c..4dfecec 100644 --- a/doc/en/ref_algorithm_Blue.rst +++ b/doc/en/ref_algorithm_Blue.rst @@ -39,7 +39,7 @@ even if it sometimes works in "slightly" non-linear cases. One can verify the linearity of the observation operator with the help of the :ref:`section_ref_algorithm_LinearityTest`. -In case of non-linearity, even slightly marked, it will be easily prefered the +In case of non-linearity, even slightly marked, it will be easily preferred the :ref:`section_ref_algorithm_ExtendedBlue` or the :ref:`section_ref_algorithm_3DVAR`. @@ -121,7 +121,7 @@ The options of the algorithm are the following: Quantiles This list indicates the values of quantile, between 0 and 1, to be estimated by simulation around the optimal state. The sampling uses a multivariate - gaussian random sampling, directed by the *a posteriori* covariance matrix. + Gaussian random sampling, directed by the *a posteriori* covariance matrix. This option is useful only if the supplementary calculation "SimulationQuantiles" has been chosen. The default is a void list. @@ -148,7 +148,7 @@ The options of the algorithm are the following: This key indicates the type of simulation, linear (with the tangent observation operator applied to perturbation increments around the optimal state) or non-linear (with standard observation operator applied to - perturbated states), one want to do for each perturbation. It changes mainly + perturbed states), one want to do for each perturbation. It changes mainly the time of each elementary calculation, usually longer in non-linear than in linear. This option is useful only if the supplementary calculation "SimulationQuantiles" has been chosen. The default value is "Linear", and diff --git a/doc/en/ref_algorithm_DerivativeFreeOptimization.rst b/doc/en/ref_algorithm_DerivativeFreeOptimization.rst index 9579eed..c2c5072 100644 --- a/doc/en/ref_algorithm_DerivativeFreeOptimization.rst +++ b/doc/en/ref_algorithm_DerivativeFreeOptimization.rst @@ -36,14 +36,14 @@ Description +++++++++++ This algorithm realizes an estimation of the state of a system by minimization -of a cost function :math:`J` without gradient. It is a method that doesn't use +of a cost function :math:`J` without gradient. It is a method that does not use the derivatives of the cost function. It fall, for example, in the same category than the :ref:`section_ref_algorithm_ParticleSwarmOptimization`. This is an optimization method allowing for global minimum search of a general error function :math:`J` of type :math:`L^1`, :math:`L^2` or :math:`L^{\infty}`, with or without weights. The default error function is the augmented weighted -least squares function, classicaly used in data assimilation. +least squares function, classically used in data assimilation. Optional and required commands ++++++++++++++++++++++++++++++ @@ -110,15 +110,15 @@ The options of the algorithm are the following: Minimizer This key allows to choose the optimization minimizer. The default choice is "BOBYQA", and the possible ones are - "BOBYQA" (minimization with or without contraints by quadratic approximation [Powell09]_), - "COBYLA" (minimization with or without contraints by linear approximation [Powell94]_ [Powell98]_). - "NEWUOA" (minimization with or without contraints by iterative quadratic approximation [Powell04]_), + "BOBYQA" (minimization with or without constraints by quadratic approximation [Powell09]_), + "COBYLA" (minimization with or without constraints by linear approximation [Powell94]_ [Powell98]_). + "NEWUOA" (minimization with or without constraints by iterative quadratic approximation [Powell04]_), "POWELL" (minimization unconstrained using conjugate directions [Powell64]_), - "SIMPLEX" (minimization with or without contraints using Nelder-Mead simplex algorithm [Nelder65]_), - "SUBPLEX" (minimization with or without contraints using Nelder-Mead on a sequence of subspaces [Rowan90]_). + "SIMPLEX" (minimization with or without constraints using Nelder-Mead simplex algorithm [Nelder65]_), + "SUBPLEX" (minimization with or without constraints using Nelder-Mead on a sequence of subspaces [Rowan90]_). Remark: the "POWELL" method perform a dual outer/inner loops optimization, leading then to less control on the cost function evaluation number because - it is the outer loop limit than is controled. If precise control on this + it is the outer loop limit than is controlled. If precise control on this cost function evaluation number is required, choose an another minimizer. Example : ``{"Minimizer":"BOBYQA"}`` diff --git a/doc/en/ref_algorithm_ExtendedKalmanFilter.rst b/doc/en/ref_algorithm_ExtendedKalmanFilter.rst index ca3a208..d6d5da9 100644 --- a/doc/en/ref_algorithm_ExtendedKalmanFilter.rst +++ b/doc/en/ref_algorithm_ExtendedKalmanFilter.rst @@ -205,7 +205,7 @@ The conditional outputs of the algorithm are the following: the difference between the optimal and the background, and in dynamic the evolution increment. - Exemple : ``d = ADD.get("Innovation")[-1]`` + Example : ``d = ADD.get("Innovation")[-1]`` See also ++++++++ diff --git a/doc/en/ref_algorithm_GradientTest.rst b/doc/en/ref_algorithm_GradientTest.rst index 6d0dd8f..5c34f91 100644 --- a/doc/en/ref_algorithm_GradientTest.rst +++ b/doc/en/ref_algorithm_GradientTest.rst @@ -64,18 +64,18 @@ function, with respect to the :math:`\alpha` parameter to the square: .. math:: R(\alpha) = \frac{|| F(\mathbf{x}+\alpha*\mathbf{dx}) - F(\mathbf{x}) - \alpha * \nabla_xF(\mathbf{dx}) ||}{\alpha^2} -This is a residue essentialy similar to the classical Taylor criterion -previously described, but its behaviour can differ depending on the numerical +This is a residue essentially similar to the classical Taylor criterion +previously described, but its behavior can differ depending on the numerical properties of the calculation. If the residue is constant until a certain level after which the residue will growth, it signifies that the gradient is well calculated until this stopping precision, and that :math:`F` is not linear. -If the residue is systematicaly growing from a very smal value with respect to +If the residue is systematically growing from a very small value with respect to :math:`||F(\mathbf{x})||`, it signifies that :math:`F` is (quasi-)linear and that the gradient calculation is correct until the precision for which the -residue reachs the numerical order of :math:`||F(\mathbf{x})||`. +residue reaches the numerical order of :math:`||F(\mathbf{x})||`. "Norm" residue ************** @@ -156,7 +156,7 @@ The options of the algorithm are the following: default choice is "Taylor", and the possible ones are "Taylor" (normalized residue of the Taylor development of the operator, which has to decrease with the square power of the perturbation), "TaylorOnNorm" (residue of the - Taylor development of the operator with respect to the pertibation to the + Taylor development of the operator with respect to the perturbation to the square, which has to remain constant) and "Norm" (residue obtained by taking the norm of the Taylor development at zero order approximation, which approximate the gradient, and which has to remain constant). @@ -195,7 +195,7 @@ writing of post-processing procedures, are described in the The unconditional outputs of the algorithm are the following: Residu - *List of values*. Each element is the value of the particular residu + *List of values*. Each element is the value of the particular residue verified during a checking algorithm, in the order of the tests. Example : ``r = ADD.get("Residu")[:]`` diff --git a/doc/en/ref_algorithm_KalmanFilter.rst b/doc/en/ref_algorithm_KalmanFilter.rst index 8bc353e..447cc89 100644 --- a/doc/en/ref_algorithm_KalmanFilter.rst +++ b/doc/en/ref_algorithm_KalmanFilter.rst @@ -197,7 +197,7 @@ The conditional outputs of the algorithm are the following: the difference between the optimal and the background, and in dynamic the evolution increment. - Exemple : ``d = ADD.get("Innovation")[-1]`` + Example : ``d = ADD.get("Innovation")[-1]`` See also ++++++++ diff --git a/doc/en/ref_algorithm_LinearityTest.rst b/doc/en/ref_algorithm_LinearityTest.rst index f3c0a5d..788519a 100644 --- a/doc/en/ref_algorithm_LinearityTest.rst +++ b/doc/en/ref_algorithm_LinearityTest.rst @@ -220,7 +220,7 @@ writing of post-processing procedures, are described in the The unconditional outputs of the algorithm are the following: Residu - *List of values*. Each element is the value of the particular residu + *List of values*. Each element is the value of the particular residue verified during a checking algorithm, in the order of the tests. Example : ``r = ADD.get("Residu")[:]`` diff --git a/doc/en/ref_algorithm_NonLinearLeastSquares.rst b/doc/en/ref_algorithm_NonLinearLeastSquares.rst index f802191..c2af40a 100644 --- a/doc/en/ref_algorithm_NonLinearLeastSquares.rst +++ b/doc/en/ref_algorithm_NonLinearLeastSquares.rst @@ -40,7 +40,7 @@ part. The background, required in the interface, is only used as an initial point for the variational minimization. In all cases, it is recommended to prefer the :ref:`section_ref_algorithm_3DVAR` -for its stability as for its behaviour during optimization. +for its stability as for its behavior during optimization. Optional and required commands ++++++++++++++++++++++++++++++ @@ -258,3 +258,4 @@ References to other sections: Bibliographical references: - [Byrd95]_ - [Morales11]_ + - [Zhu97]_ diff --git a/doc/en/ref_algorithm_ParticleSwarmOptimization.rst b/doc/en/ref_algorithm_ParticleSwarmOptimization.rst index aa6f918..b6ce80b 100644 --- a/doc/en/ref_algorithm_ParticleSwarmOptimization.rst +++ b/doc/en/ref_algorithm_ParticleSwarmOptimization.rst @@ -32,13 +32,13 @@ Description This algorithm realizes an estimation of the state of a dynamic system by minimization of a cost function :math:`J` by using a particle swarm. It is a -method that doesn't use the derivatives of the cost function. It fall in the +method that does not use the derivatives of the cost function. It fall in the same category than the :ref:`section_ref_algorithm_DerivativeFreeOptimization`. This is an optimization method allowing for global minimum search of a general error function :math:`J` of type :math:`L^1`, :math:`L^2` or :math:`L^{\infty}`, with or without weights. The default error function is the augmented weighted -least squares function, classicaly used in data assimilation. +least squares function, classically used in data assimilation. Optional and required commands ++++++++++++++++++++++++++++++ diff --git a/doc/en/ref_algorithm_SamplingTest.rst b/doc/en/ref_algorithm_SamplingTest.rst index dec8206..15b8b4c 100644 --- a/doc/en/ref_algorithm_SamplingTest.rst +++ b/doc/en/ref_algorithm_SamplingTest.rst @@ -34,7 +34,7 @@ This algorithm allows to calculate the values, linked to a :math:`\mathbf{x}` state, of a general error function :math:`J` of type :math:`L^1`, :math:`L^2` or :math:`L^{\infty}`, with or without weights, and of the observation operator, for an priori given states sample. The default error function is the augmented -weighted least squares function, classicaly used in data assimilation. +weighted least squares function, classically used in data assimilation. It is useful to test the sensitivity, of the error function :math:`J`, in particular, to the state :math:`\mathbf{x}` variations. When a state is not @@ -49,8 +49,8 @@ To be visible by the user, the results of sampling has to be explicitly asked for. One use for that, on the desired variable, the final saving through "*UserPostAnalysis*" or the treatment during the calculation by "*observer*". -To perform distributed or more complex sampling, see other modules available in -SALOME : PARAMETRIC or OPENTURNS. +To perform distributed or more complex sampling, see OPENTURNS module available +in SALOME. Optional and required commands ++++++++++++++++++++++++++++++ @@ -138,10 +138,10 @@ The options of the algorithm are the following: SampleAsIndependantRandomVariables This key describes the calculations points as an hyper-cube, for which the - points on each axis come from a independant random sampling of the axis + points on each axis come from a independent random sampling of the axis variable, under the specification of the distribution, its parameters and the number of points in the sample, as a list ``['distribution', - [parametres], nombre]`` for each axis. The possible distributions are + [parameters], number]`` for each axis. The possible distributions are 'normal' of parameters (mean,std), 'lognormal' of parameters (mean,sigma), 'uniform' of parameters (low,high), or 'weibull' of parameter (shape). That is then a list of the same size than the one of the state. @@ -240,5 +240,4 @@ References to other sections: - :ref:`section_ref_algorithm_FunctionTest` References to other SALOME modules: - - PARAMETRIC, see the *User guide of PARAMETRIC module* in the main "*Help*" menu of SALOME platform - OPENTURNS, see the *User guide of OPENTURNS module* in the main "*Help*" menu of SALOME platform diff --git a/doc/en/ref_algorithm_TangentTest.rst b/doc/en/ref_algorithm_TangentTest.rst index c92444f..16948e1 100644 --- a/doc/en/ref_algorithm_TangentTest.rst +++ b/doc/en/ref_algorithm_TangentTest.rst @@ -148,7 +148,7 @@ writing of post-processing procedures, are described in the The unconditional outputs of the algorithm are the following: Residu - *List of values*. Each element is the value of the particular residu + *List of values*. Each element is the value of the particular residue verified during a checking algorithm, in the order of the tests. Example : ``r = ADD.get("Residu")[:]`` diff --git a/doc/en/ref_assimilation_keywords.rst b/doc/en/ref_assimilation_keywords.rst index c0c906b..89794ac 100644 --- a/doc/en/ref_assimilation_keywords.rst +++ b/doc/en/ref_assimilation_keywords.rst @@ -66,7 +66,7 @@ The different commands are the following: algorithm chosen by the keyword "*Algorithm*", and its potential optional parameters. The algorithm choices are available through the GUI. There exists for example "3DVAR", "Blue"... Each algorithm is defined, below, by a - specific subsection. Optionaly, the command allows also to add some + specific subsection. Optionally, the command allows also to add some parameters to control the algorithm. Their values are defined either explicitly or in a "*Dict*" type object. See the :ref:`section_ref_options_Algorithm_Parameters` for the detailed use of this diff --git a/doc/en/ref_covariance_requirements.rst b/doc/en/ref_covariance_requirements.rst index 841ea71..11d248d 100644 --- a/doc/en/ref_covariance_requirements.rst +++ b/doc/en/ref_covariance_requirements.rst @@ -32,7 +32,7 @@ Requirements to describe covariance matrices .. index:: single: covariances In general, a covariance matrix (or a variance-covariance matrix) has to be -squared, symetric, semi-definite positive. Each of its terms describes the +squared, symmetric, semi-definite positive. Each of its terms describes the covariance of the two random variables corresponding to its position in the matrix. The normalized form of the covariance is the linear correlation. One can express the following relation, between a covariance matrix :math:`\mathbf{M}` diff --git a/doc/en/ref_observers_requirements.rst b/doc/en/ref_observers_requirements.rst index f6d67bd..7c6eda3 100644 --- a/doc/en/ref_observers_requirements.rst +++ b/doc/en/ref_observers_requirements.rst @@ -50,7 +50,7 @@ The "*observer*" can be given as a explicit script (entry of type "*String*"), as a script in an external file (entry of type "*Script*"), or by using a template or pattern (entry of type"*Template*") available by default in ADAO when using the graphical editor. These templates are simple scripts that can be -tuned by the user, either in the integrated edtition stage of the case, or in +tuned by the user, either in the integrated edition stage of the case, or in the edition stage of the schema before execution, to improve the ADAO case performance in the SALOME execution supervisor. @@ -108,7 +108,7 @@ Print on standard output the current value of the variable, adding its index. Template **ValueSeriePrinter** : ................................ -Print on standard output the value serie of the variable. +Print on standard output the value series of the variable. :: @@ -140,7 +140,7 @@ Save the current value of the variable in a file of the '/tmp' directory named ' Template **ValueSerieSaver** : .............................. -Save the value serie of the variable in a file of the '/tmp' directory named 'value...txt' from the variable name and the saving step. +Save the value series of the variable in a file of the '/tmp' directory named 'value...txt' from the variable name and the saving step. :: @@ -205,7 +205,7 @@ Print on standard output and, in the same time save in a file, the current value Template **ValueSeriePrinterAndSaver** : ........................................ -Print on standard output and, in the same time, save in a file the value serie of the variable. +Print on standard output and, in the same time, save in a file the value series of the variable. :: @@ -249,7 +249,7 @@ Graphically plot with Gnuplot the current value of the variable. Template **ValueSerieGnuPlotter** : ................................... -Graphically plot with Gnuplot the value serie of the variable. +Graphically plot with Gnuplot the value series of the variable. :: @@ -294,7 +294,7 @@ Print on standard output and, in the same time, graphically plot with Gnuplot th Template **ValueSeriePrinterAndGnuPlotter** : ............................................. -Print on standard output and, in the same time, graphically plot with Gnuplot the value serie of the variable. +Print on standard output and, in the same time, graphically plot with Gnuplot the value series of the variable. :: @@ -350,7 +350,7 @@ Print on standard output and, in the same, time save in a file and graphically p Template **ValueSeriePrinterSaverAndGnuPlotter** : .................................................. -Print on standard output and, in the same, time save in a file and graphically plot the value serie of the variable. +Print on standard output and, in the same, time save in a file and graphically plot the value series of the variable. :: diff --git a/doc/en/ref_operator_requirements.rst b/doc/en/ref_operator_requirements.rst index fb3b6c5..86e98d6 100644 --- a/doc/en/ref_operator_requirements.rst +++ b/doc/en/ref_operator_requirements.rst @@ -115,7 +115,7 @@ implementation. One can then use the "*FunctionTest*" ADAO checking algorithm test. **Important warning:** the name "*DirectOperator*" is mandatory, and the type of -the ``X`` argument can be either a list, a numpy array or a numpy 1D-matrix. The +the ``X`` argument can be either a list, a Numpy array or a Numpy 1D-matrix. The user function has to accept and treat all these cases. Second functional form: using "*ScriptWithFunctions*" @@ -169,7 +169,7 @@ return the associated matrix. **Important warning:** the names "*DirectOperator*", "*TangentOperator*" and "*AdjointOperator*" are mandatory, and the type of the ``X``, Y``, ``dX`` -arguments can be either a python list, a numpy array or a numpy 1D-matrix. The +arguments can be either a python list, a Numpy array or a Numpy 1D-matrix. The user has to treat these cases in his script. Third functional form: using "*ScriptWithSwitch*" @@ -276,19 +276,18 @@ In such a case with explicit control, only the second functional form (using "*ScriptWithFunctions*") and third functional form (using "*ScriptWithSwitch*") can be used. -Additional notes on nondimensionalization of operators -++++++++++++++++++++++++++++++++++++++++++++++++++++++ +Additional notes on dimensionless transformation of operators ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ .. index:: single: Nondimensionalization .. index:: single: Dimensionless It is common that physical quantities, in input or output of the operators, have significant differences in magnitude or rate of change. One way to avoid -numerical difficulties is to use, or to set, a nondimensionalization of the +numerical difficulties is to use, or to set, a dimensionless version of calculations carried out in operators [WikipediaND]_. In principle, since -physical simulation should be as dimensionless as possible, it is firstly -recommended to use the existing capacity of nondimensionalization of the -calculation code. +physical simulation should be as dimensionless as possible, it is at first +recommended to use the existing dimensionless capacity of the calculation code. However, in the common case where we can not dispose of it, it is often useful to surround the calculation to remove dimension for input or output. A simple @@ -301,8 +300,8 @@ component of :math:`\mathbf{x}^b` is non zero, one can indeed put: and then optimize the multiplicative parameter :math:`\mathbf{\alpha}`. This parameter has as default value (or as background) a vector of 1. Be careful, -applying a process of nondimensionalization also requires changing the error -covariances associated in an ADAO formulation of the optimization problem. +applying a process of dimensionless transformation also requires changing the +associated error covariances in an ADAO formulation of the optimization problem. Such a process is rarely enough to avoid all the numerical problems, but it often improves a lot the numeric conditioning of the optimization. diff --git a/doc/en/ref_options_AlgorithmParameters.rst b/doc/en/ref_options_AlgorithmParameters.rst index 88a6210..4cedea8 100644 --- a/doc/en/ref_options_AlgorithmParameters.rst +++ b/doc/en/ref_options_AlgorithmParameters.rst @@ -40,7 +40,7 @@ is determined as follows in the graphical user interface: #. then secondly, only in the "*Dict*" case of "*Parameters*", by the included keyword "*FROM*" which allows to choose between a string entry and a Python script file entry. If an option or a parameter is specified by the user for an algorithm that -doesn't support it, the option is simply left unused and don't stop the +does not support it, the option is simply left unused and don't stop the treatment. The meaning of the acronyms or particular names can be found in the :ref:`genindex` or the :ref:`section_glossary`. @@ -64,8 +64,8 @@ is selected by the user. One can then modify its value, or fill it in list cases for example. It is the recommended way to modify only some algorithmic parameters in a safe -way. This method allows only to define authorized paramters for a given -algorithm, and the defined values are not keeped if the user changes the +way. This method allows only to define authorized parameters for a given +algorithm, and the defined values are not kept if the user changes the algorithm. Second method : using a string in the graphical interface diff --git a/doc/en/ref_output_variables.rst b/doc/en/ref_output_variables.rst index 7cc27a1..88cc227 100644 --- a/doc/en/ref_output_variables.rst +++ b/doc/en/ref_output_variables.rst @@ -75,13 +75,13 @@ Examples of Python scripts to obtain or treat the outputs .. index:: single: AnalysisPrinterAndSaver These examples present Python commands or scripts which allow to obtain or to -treat the ouput of an algorithm run. To help the user, they are directly +treat the output of an algorithm run. To help the user, they are directly available in the user interface, when building the ADAO case in the embedded case editor, in the "*Template*" type fields. In an equivalent way, these commands can be integrated in an external user script (and inserted in the ADAO case by a "*Script*" type input) or can exist as a string, including line feeds (and inserted in the ADAO case by a "*String*" type input). Lot of variants can -be build from these simple examples, the main objective beeing to help the user +be build from these simple examples, the main objective being to help the user to elaborate the exact procedure he needs in output. The first example (named "*AnalysisPrinter*" in the inputs of type @@ -94,7 +94,7 @@ analysis or the optimal state, noted as :math:`\mathbf{x}^a` in the section print 'Analysis:',xa" The ``numpy.ravel`` function is here to be sure that the ``xa`` variable will -contain a real unidimensional vector, whatever the previoux computing choices +contain a real unidimensional vector, whatever the previous computing choices are. A second example (named "*AnalysisSaver*" in the inputs of type "*Template*") @@ -124,7 +124,7 @@ value of :math:`\mathbf{x}^a`. It is realized by the commands:: To facilitate these examples extension for user needs, we recall that all the SALOME functions are available at the same level than these commands. The user can for example request for graphical representation with the PARAVIS [#]_ or -other modules, for computating operations driven by YACS [#]_ or an another +other modules, for computing operations driven by YACS [#]_ or an another module, etc. Other usage examples are also given for :ref:`section_u_step4` of the @@ -287,7 +287,7 @@ of availability. They are the following, in alphabetical order: CurrentOptimum *List of vectors*. Each element is the optimal state obtained at the current - step of the optimization algorithm. It is not necessarely the last state. + step of the optimization algorithm. It is not necessarily the last state. Example : ``Xo = ADD.get("CurrentOptimum")[:]`` @@ -300,7 +300,7 @@ of availability. They are the following, in alphabetical order: IndexOfOptimum *List of integers*. Each element is the iteration index of the optimum obtained at the current step the optimization algorithm. It is not - necessarely the number of the last iteration. + necessarily the number of the last iteration. Example : ``i = ADD.get("MahalanobisConsistency")[-1]`` diff --git a/doc/en/theory.rst b/doc/en/theory.rst index 002f176..c77b29c 100644 --- a/doc/en/theory.rst +++ b/doc/en/theory.rst @@ -321,7 +321,7 @@ classical gradient optimization. But other measures of errors can be more adapted to real physical simulation problems. Then, **an another way to extend estimation possibilities is to use other measures of errors to be reduced**. For example, we can cite *absolute error value*, *maximum error value*, etc. These -error measures are not differentiables, but some optimization methods can deal +error measures are not differentiable, but some optimization methods can deal with: heuristics and meta-heuristics for real-valued problem, etc. As previously, the main drawback remain a greater numerical cost to find state estimates, and no guarantee of convergence in finite time. Here again, we only diff --git a/doc/en/tui.rst b/doc/en/tui.rst index ccd819d..df82a5f 100644 --- a/doc/en/tui.rst +++ b/doc/en/tui.rst @@ -79,7 +79,7 @@ More details are given here on the successive steps of the setup of an ADAO TUI calculation case. The commands themselves are detailed just after in the :ref:`subsection_tui_commands`. -The creation and initialisation of a study are done using the following +The creation and initialization of a study are done using the following commands, the ``case`` object name of the ADAO TUI calculation case being let free to the user choice:: @@ -267,7 +267,7 @@ according to the second syntax. Creating a calculation case in TUI text interface +++++++++++++++++++++++++++++++++++++++++++++++++ -The creation and the initialisation of a calculation case in TUI text interface +The creation and the initialization of a calculation case in TUI text interface are done by importing the interface module "*adaoBuilder*" and by by invoking its method "*New()*" as illustrated in the following lines (the ``case`` object name being let free to the user choice):: @@ -641,12 +641,6 @@ parameters that were used to artificially build the observations. .. Utilisation de fonctions de surveillance de type "observer" .. +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -.. Suivre d'un recalage à l'aide de MatPlotLib -.. +++++++++++++++++++++++++++++++++++++++++++ - -.. Equivalences entre l'interface graphique (GUI) et l'interface textuelle (TUI) -.. ----------------------------------------------------------------------------- - .. [HOMARD] For more information on HOMARD, see the *HOMARD module* and its integrated help available from the main menu *Help* of the SALOME platform. .. [PARAVIS] For more information on PARAVIS, see the *PARAVIS module* and its integrated help available from the main menu *Help* of the SALOME platform. diff --git a/doc/en/using.rst b/doc/en/using.rst index b81e9df..390fa6e 100644 --- a/doc/en/using.rst +++ b/doc/en/using.rst @@ -242,7 +242,7 @@ of the following form:: Entering in the assimilation study Name is set to........: Test Algorithm is set to...: Blue - Launching the analyse + Launching the analysis Optimal state: [0.5, 0.5, 0.5] diff --git a/doc/fr/bibliography.rst b/doc/fr/bibliography.rst index 3fa58ad..b178ab6 100644 --- a/doc/fr/bibliography.rst +++ b/doc/fr/bibliography.rst @@ -100,4 +100,4 @@ Bibliographie *Nota Bene* : un lien vers la version française de chaque page Wikipédia se trouve dans le sous-menu "*Languages*". Les deux versions sont complémentaires car, même si souvent le contenu en anglais est plus fourni, les informations -disponibles dans chaque langue ne sont pas toujours identiques. +disponibles dans chaque langue ne sont pas identiques. diff --git a/doc/fr/examples.rst b/doc/fr/examples.rst index 57c8522..c4bef6f 100644 --- a/doc/fr/examples.rst +++ b/doc/fr/examples.rst @@ -342,7 +342,7 @@ dictionnaire sont optionnelles, elles disposent toutes d'une valeur par d et elles peuvent être présentes sans être utiles. Par exemple:: AlgorithmParameters = { - "Minimizer" : "CG", # Choix possible : "LBFGSB", "TNC", "CG", "BFGS" + "Minimizer" : "LBFGSB", # Recommended "MaximumNumberOfSteps" : 10, } @@ -350,7 +350,8 @@ Si aucune borne n'est requise sur les variables de contr choisir les algorithmes de minimisation "*BFGS*" ou "*CG*" pour tous les algorithmes variationnels d'assimilation de données ou d'optimisation. Pour l'optimisation sous contraintes, l'algorithme "*LBFGSB*" est bien souvent plus -robuste, mais le "*TNC*" est parfois plus performant. +robuste, mais le "*TNC*" est parfois plus performant. De manière générale, +le choix de l'algorithme "*LBFGSB*" est recommandé. Ensuite le script peut être ajouté au cas ADAO, dans une entrée de type fichier pour le mot-clé "*AlgorithmParameters*", de la manière suivante: @@ -428,7 +429,7 @@ l' et, avec un écart-type arbitraire de 1% sur chaque composante de l'erreur:: - R = 0.0001 * diagonal( lenght(Yo) ) + R = 0.0001 * diagonal( length(Yo) ) Toutes les informations requises pour l'estimation par assimilation de données sont maintenant définies. @@ -576,7 +577,7 @@ d # Creating the required ADAO variable # ----------------------------------- AlgorithmParameters = { - "Minimizer" : "TNC", # Possible : "LBFGSB", "TNC", "CG", "BFGS" + "Minimizer" : "LBFGSB", # Recommended "MaximumNumberOfSteps" : 15, # Number of global iterative steps "Bounds" : [ [ None, None ], # Bound on the first parameter diff --git a/doc/fr/ref_algorithm_3DVAR.rst b/doc/fr/ref_algorithm_3DVAR.rst index 53b3721..25bbc6a 100644 --- a/doc/fr/ref_algorithm_3DVAR.rst +++ b/doc/fr/ref_algorithm_3DVAR.rst @@ -413,3 +413,4 @@ R - [Byrd95]_ - [Morales11]_ - [Talagrand97]_ + - [Zhu97]_ diff --git a/doc/fr/ref_algorithm_ExtendedKalmanFilter.rst b/doc/fr/ref_algorithm_ExtendedKalmanFilter.rst index 2a13572..71e9afd 100644 --- a/doc/fr/ref_algorithm_ExtendedKalmanFilter.rst +++ b/doc/fr/ref_algorithm_ExtendedKalmanFilter.rst @@ -105,7 +105,7 @@ Les options de l'algorithme sont les suivantes: ConstrainedBy Cette clé permet d'indiquer la méthode de prise en compte des contraintes de - bornes. La seule disponible est "EstimateProjection", qui projete + bornes. La seule disponible est "EstimateProjection", qui projette l'estimation de l'état courant sur les contraintes de bornes. Exemple : ``{"ConstrainedBy":"EstimateProjection"}`` diff --git a/doc/fr/ref_algorithm_GradientTest.rst b/doc/fr/ref_algorithm_GradientTest.rst index 6de7283..113808d 100644 --- a/doc/fr/ref_algorithm_GradientTest.rst +++ b/doc/fr/ref_algorithm_GradientTest.rst @@ -45,14 +45,14 @@ normalis .. math:: R(\alpha) = \frac{|| F(\mathbf{x}+\alpha*\mathbf{dx}) - F(\mathbf{x}) - \alpha * \nabla_xF(\mathbf{dx}) ||}{|| F(\mathbf{x}) ||} -Si le résidu décroit et que la décroissance se fait en :math:`\alpha^2` selon +Si le résidu décroît et que la décroissance se fait en :math:`\alpha^2` selon :math:`\alpha`, cela signifie que le gradient est bien calculé jusqu'à la précision d'arrêt de la décroissance quadratique, et que :math:`F` n'est pas linéaire. -Si le résidu décroit et que la décroissance se fait en :math:`\alpha` selon +Si le résidu décroît et que la décroissance se fait en :math:`\alpha` selon :math:`\alpha`, jusqu'à un certain seuil après lequel le résidu est faible et -constant, cela signifie que :math:`F` est linéaire et que le résidu décroit à +constant, cela signifie que :math:`F` est linéaire et que le résidu décroît à partir de l'erreur faite dans le calcul du terme :math:`\nabla_xF`. Résidu "TaylorOnNorm" diff --git a/doc/fr/ref_algorithm_LinearityTest.rst b/doc/fr/ref_algorithm_LinearityTest.rst index c65ea2a..587c797 100644 --- a/doc/fr/ref_algorithm_LinearityTest.rst +++ b/doc/fr/ref_algorithm_LinearityTest.rst @@ -53,7 +53,7 @@ Si le r faible qu'à partir d'un certain ordre d'incrément, l'hypothèse de linéarité de :math:`F` n'est pas vérifiée. -Si le résidu décroit et que la décroissance se fait en :math:`\alpha^2` selon +Si le résidu décroît et que la décroissance se fait en :math:`\alpha^2` selon :math:`\alpha`, cela signifie que le gradient est bien calculé jusqu'au niveau d'arrêt de la décroissance quadratique. @@ -72,7 +72,7 @@ Si le r faible qu'à partir d'un certain ordre d'incrément, l'hypothèse de linéarité de :math:`F` n'est pas vérifiée. -Si le résidu décroit et que la décroissance se fait en :math:`\alpha^2` selon +Si le résidu décroît et que la décroissance se fait en :math:`\alpha^2` selon :math:`\alpha`, cela signifie que le gradient est bien calculé jusqu'au niveau d'arrêt de la décroissance quadratique. diff --git a/doc/fr/ref_algorithm_NonLinearLeastSquares.rst b/doc/fr/ref_algorithm_NonLinearLeastSquares.rst index 52aa54d..b59ac57 100644 --- a/doc/fr/ref_algorithm_NonLinearLeastSquares.rst +++ b/doc/fr/ref_algorithm_NonLinearLeastSquares.rst @@ -35,7 +35,7 @@ la fonctionnelle :math:`J` d' .. math:: J(\mathbf{x})=(\mathbf{y}^o-\mathbf{H}.\mathbf{x})^T.\mathbf{R}^{-1}.(\mathbf{y}^o-\mathbf{H}.\mathbf{x}) -Il est similaire à l':ref:`section_ref_algorithm_3DVAR` amputé de sa partie +Il est similaire à l':ref:`section_ref_algorithm_3DVAR` privé de sa partie ébauche. L'ébauche, requise dans l'interface, ne sert que de point initial pour la minimisation variationnelle. @@ -268,3 +268,4 @@ R Références bibliographiques : - [Byrd95]_ - [Morales11]_ + - [Zhu97]_ diff --git a/doc/fr/ref_algorithm_SamplingTest.rst b/doc/fr/ref_algorithm_SamplingTest.rst index 8e7764e..bc9e319 100644 --- a/doc/fr/ref_algorithm_SamplingTest.rst +++ b/doc/fr/ref_algorithm_SamplingTest.rst @@ -51,8 +51,8 @@ Pour appara sauvegarde finale à l'aide du mot-clé "*UserPostAnalysis*" ou le traitement en cours de calcul à l'aide des "*observer*" adaptés. -Pour effectuer un échantillonnage distribué ou plus complexe, voir d'autres -modules disponibles dans SALOME : PARAMETRIC ou OPENTURNS. +Pour effectuer un échantillonnage distribué ou plus complexe, voir le module +OPENTURNS disponible dans SALOME. Commandes requises et optionnelles ++++++++++++++++++++++++++++++++++ @@ -249,5 +249,4 @@ R - :ref:`section_ref_algorithm_FunctionTest` Références vers d'autres modules SALOME : - - PARAMETRIC, voir le *Guide utilisateur du module PARAMETRIC* dans le menu principal *Aide* de l'environnement SALOME - OPENTURNS, voir le *Guide utilisateur du module OPENTURNS* dans le menu principal *Aide* de l'environnement SALOME diff --git a/doc/fr/ref_algorithm_UnscentedKalmanFilter.rst b/doc/fr/ref_algorithm_UnscentedKalmanFilter.rst index 2f9f74f..e0e82cd 100644 --- a/doc/fr/ref_algorithm_UnscentedKalmanFilter.rst +++ b/doc/fr/ref_algorithm_UnscentedKalmanFilter.rst @@ -110,7 +110,7 @@ Les options de l'algorithme sont les suivantes: ConstrainedBy Cette clé permet d'indiquer la méthode de prise en compte des contraintes de - bornes. La seule disponible est "EstimateProjection", qui projete + bornes. La seule disponible est "EstimateProjection", qui projette l'estimation de l'état courant sur les contraintes de bornes. Exemple : ``{"ConstrainedBy":"EstimateProjection"}`` diff --git a/doc/fr/ref_entry_types.rst b/doc/fr/ref_entry_types.rst index f22e5ca..a8d4d90 100644 --- a/doc/fr/ref_entry_types.rst +++ b/doc/fr/ref_entry_types.rst @@ -48,7 +48,7 @@ d par exemple "1 2 ; 3 4" ou "[[1,2],[3,4]]" pour une matrice carrée de taille 2x2. -Les différents autres pseudo-types sont les suivants. Les variables auquelles +Les différents autres pseudo-types sont les suivants. Les variables auxquelles ils s'appliquent peuvent elles-mêmes être données soit par une chaîne de caractères (un "*String*"), soit par un fichier script (un "*Script*"): diff --git a/doc/fr/theory.rst b/doc/fr/theory.rst index e46dde5..ebe3bbc 100644 --- a/doc/fr/theory.rst +++ b/doc/fr/theory.rst @@ -153,7 +153,7 @@ Description simple du cadre m On peut décrire ces démarches de manière simple. Par défaut, toutes les variables sont des vecteurs, puisqu'il y a plusieurs paramètres à ajuster, ou un -champ discretisé à reconstruire. +champ discrétisé à reconstruire. Selon les notations standards en assimilation de données, on note :math:`\mathbf{x}^a` les paramètres optimaux qui doivent être déterminés par diff --git a/doc/fr/tui.rst b/doc/fr/tui.rst index 4c87fe3..1684f61 100644 --- a/doc/fr/tui.rst +++ b/doc/fr/tui.rst @@ -564,7 +564,7 @@ Les hypoth #. que l'on veut recaler 3 paramètres ``alpha``, ``beta`` et ``gamma`` dans un domaine borné, #. que l'on dispose d'observations nommées ``observations``, -#. que l'utilisateur dispose en Python d'une fonction de simulation physique appellée ``simulation``, préalablement (bien) testée, qui transforme les 3 paramètres en résultats similaires aux observations, +#. que l'utilisateur dispose en Python d'une fonction de simulation physique appelée ``simulation``, préalablement (bien) testée, qui transforme les 3 paramètres en résultats similaires aux observations, #. que l'exploitation indépendante, que l'utilisateur veut faire, est représentée ici par l'affichage simple de l'état initial, de l'état optimal, de la simulation en ce point, des états intermédiaires et du nombre d'itérations d'optimisation. Pour effectuer de manière simple cet essai de cas de calcul TUI, on se donne par @@ -665,7 +665,7 @@ L'ex Etat optimal...................: [ 2. 3. 4.] Simulation à l'état optimal....: [ 2. 6. 12. 20.] -Comme il se doit en expériences jumelles, on constate que l'on retouve bien les +Comme il se doit en expériences jumelles, on constate que l'on retrouve bien les paramètres qui ont servi à construire artificiellement les observations. .. Réconciliation de courbes à l'aide de MedCoupling @@ -674,12 +674,6 @@ param .. Utilisation de fonctions de surveillance de type "observer" .. +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -.. Suivre d'un recalage à l'aide de MatPlotLib -.. +++++++++++++++++++++++++++++++++++++++++++ - -.. Equivalences entre l'interface graphique (GUI) et l'interface textuelle (TUI) -.. ----------------------------------------------------------------------------- - .. [HOMARD] Pour de plus amples informations sur HOMARD, voir le *module HOMARD* et son aide intégrée disponible dans le menu principal *Aide* de l'environnement SALOME. .. [PARAVIS] Pour de plus amples informations sur PARAVIS, voir le *module PARAVIS* et son aide intégrée disponible dans le menu principal *Aide* de l'environnement SALOME. diff --git a/doc/fr/using.rst b/doc/fr/using.rst index adcbab0..f2dc368 100644 --- a/doc/fr/using.rst +++ b/doc/fr/using.rst @@ -146,7 +146,7 @@ Au final, il faut enregistrer le cas ADAO en utilisant le bouton "*Enregistrer*" choisissant l'entrée "*Enregistrer/ Enregistrer sous*" dans le menu "*ADAO*". Il est alors demandé un emplacement, à choisir dans l'arborescence des fichiers, et un nom, qui sera complété par l'extension "*.comm*" utilisée pour les fichiers -de l'éditeur intégre de cas. Cette action va générer une paire de fichiers +de l'éditeur intégré de cas. Cette action va générer une paire de fichiers décrivant le cas ADAO, avec le même nom de base, le premier présentant une extension "*.comm*" et le second une extension "*.py*" [#]_. @@ -202,7 +202,7 @@ capacit description complète de ces éléments. En pratique, le schéma YACS dispose d'un port de sortie "*algoResults*" dans le -bloc de calcul, qui donne accès à un objet structuré nommé ci-aprés "*ADD*" par +bloc de calcul, qui donne accès à un objet structuré nommé ci-après "*ADD*" par exemple, qui contient tous les résultats de calcul. Ces résultats peuvent être obtenus en récupérant les variables nommées stockées au cours des calculs. L'information principale est la variable "*Analysis*", qui peut être obtenue par @@ -255,7 +255,7 @@ simple ci-dessus est de la forme suivante:: Entering in the assimilation study Name is set to........: Test Algorithm is set to...: Blue - Launching the analyse + Launching the analysis Optimal state: [0.5, 0.5, 0.5] diff --git a/src/daComposant/daCore/Templates.py b/src/daComposant/daCore/Templates.py index 9074cb6..fa3bb96 100644 --- a/src/daComposant/daCore/Templates.py +++ b/src/daComposant/daCore/Templates.py @@ -110,7 +110,7 @@ ObserverTemplates.store( name = "ValueSeriePrinter", content = """print info, var[:]""", fr_FR = "Imprime sur la sortie standard la série des valeurs de la variable", - en_EN = "Print on standard output the value serie of the variable", + en_EN = "Print on standard output the value series of the variable", order = "next", ) ObserverTemplates.store( @@ -124,7 +124,7 @@ ObserverTemplates.store( name = "ValueSerieSaver", content = """import numpy, re\nv=numpy.array(var[:], ndmin=1)\nglobal istep\ntry:\n istep += 1\nexcept:\n istep = 0\nf='/tmp/value_%s_%05i.txt'%(info,istep)\nf=re.sub('\\s','_',f)\nprint 'Value saved in \"%s\"'%f\nnumpy.savetxt(f,v)""", fr_FR = "Enregistre la série des valeurs de la variable dans un fichier du répertoire '/tmp' nommé 'value...txt' selon le nom de la variable et l'étape", - en_EN = "Save the value serie of the variable in a file of the '/tmp' directory named 'value...txt' from the variable name and the saving step", + en_EN = "Save the value series of the variable in a file of the '/tmp' directory named 'value...txt' from the variable name and the saving step", order = "next", ) ObserverTemplates.store( @@ -145,7 +145,7 @@ ObserverTemplates.store( name = "ValueSeriePrinterAndSaver", content = """import numpy, re\nv=numpy.array(var[:], ndmin=1)\nprint info,v\nglobal istep\ntry:\n istep += 1\nexcept:\n istep = 0\nf='/tmp/value_%s_%05i.txt'%(info,istep)\nf=re.sub('\\s','_',f)\nprint 'Value saved in \"%s\"'%f\nnumpy.savetxt(f,v)""", fr_FR = "Imprime sur la sortie standard et, en même temps, enregistre dans un fichier la série des valeurs de la variable", - en_EN = "Print on standard output and, in the same time, save in a file the value serie of the variable", + en_EN = "Print on standard output and, in the same time, save in a file the value series of the variable", order = "next", ) ObserverTemplates.store( @@ -159,7 +159,7 @@ ObserverTemplates.store( name = "ValueSerieGnuPlotter", content = """import numpy, Gnuplot\nv=numpy.array(var[:], ndmin=1)\nglobal ifig, gp\ntry:\n ifig += 1\n gp(' set style data lines')\nexcept:\n ifig = 0\n gp = Gnuplot.Gnuplot(persist=1)\n gp(' set style data lines')\ngp('set title \"%s (Figure %i)\"'%(info,ifig))\ngp.plot( Gnuplot.Data( v, with_='lines lw 2' ) )""", fr_FR = "Affiche graphiquement avec Gnuplot la série des valeurs de la variable", - en_EN = "Graphically plot with Gnuplot the value serie of the variable", + en_EN = "Graphically plot with Gnuplot the value series of the variable", order = "next", ) ObserverTemplates.store( @@ -173,7 +173,7 @@ ObserverTemplates.store( name = "ValueSeriePrinterAndGnuPlotter", content = """print info, var[:] \nimport numpy, Gnuplot\nv=numpy.array(var[:], ndmin=1)\nglobal ifig,gp\ntry:\n ifig += 1\n gp(' set style data lines')\nexcept:\n ifig = 0\n gp = Gnuplot.Gnuplot(persist=1)\n gp(' set style data lines')\ngp('set title \"%s (Figure %i)\"'%(info,ifig))\ngp.plot( Gnuplot.Data( v, with_='lines lw 2' ) )""", fr_FR = "Imprime sur la sortie standard et, en même temps, affiche graphiquement avec Gnuplot la série des valeurs de la variable", - en_EN = "Print on standard output and, in the same time, graphically plot with Gnuplot the value serie of the variable", + en_EN = "Print on standard output and, in the same time, graphically plot with Gnuplot the value series of the variable", order = "next", ) ObserverTemplates.store( @@ -187,7 +187,7 @@ ObserverTemplates.store( name = "ValueSeriePrinterSaverAndGnuPlotter", content = """print info, var[:] \nimport numpy, re\nv=numpy.array(var[:], ndmin=1)\nglobal istep\ntry:\n istep += 1\nexcept:\n istep = 0\nf='/tmp/value_%s_%05i.txt'%(info,istep)\nf=re.sub('\\s','_',f)\nprint 'Value saved in \"%s\"'%f\nnumpy.savetxt(f,v)\nimport Gnuplot\nglobal ifig,gp\ntry:\n ifig += 1\n gp(' set style data lines')\nexcept:\n ifig = 0\n gp = Gnuplot.Gnuplot(persist=1)\n gp(' set style data lines')\ngp('set title \"%s (Figure %i)\"'%(info,ifig))\ngp.plot( Gnuplot.Data( v, with_='lines lw 2' ) )""", fr_FR = "Imprime sur la sortie standard et, en même temps, enregistre dans un fichier et affiche graphiquement la série des valeurs de la variable", - en_EN = "Print on standard output and, in the same, time save in a file and graphically plot the value serie of the variable", + en_EN = "Print on standard output and, in the same, time save in a file and graphically plot the value series of the variable", order = "next", ) ObserverTemplates.store( diff --git a/src/daSalome/daYacsIntegration/daOptimizerLoop.py b/src/daSalome/daYacsIntegration/daOptimizerLoop.py index 04c2f7f..362eb9e 100644 --- a/src/daSalome/daYacsIntegration/daOptimizerLoop.py +++ b/src/daSalome/daYacsIntegration/daOptimizerLoop.py @@ -343,7 +343,7 @@ class AssimilationAlgorithm_asynch(SALOMERuntime.OptimizerAlgASync): self.ADD.setDataObserver(observer_name, HookFunction=self.obs, HookParameters = observer_name) # Start Assimilation Study - print "Launching the analyse\n" + print "Launching the analysis\n" try: self.ADD.analyze() except Exception as e: -- 2.39.2