Salome HOME
Code and documentation update
[modules/adao.git] / src / daComposant / daAlgorithms / Atoms / ecwnlls.py
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) 2008-2022 EDF R&D
4 #
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
9 #
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13 # Lesser General Public License for more details.
14 #
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18 #
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
20 #
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
22
23 __doc__ = """
24     Non Linear Least Squares
25 """
26 __author__ = "Jean-Philippe ARGAUD"
27
28 import numpy, scipy, scipy.optimize, scipy.version
29
30 # ==============================================================================
31 def ecwnlls(selfA, Xb, Y, U, HO, CM, R, B, __storeState = False):
32     """
33     Correction
34     """
35     #
36     # Initialisations
37     # ---------------
38     Hm = HO["Direct"].appliedTo
39     Ha = HO["Adjoint"].appliedInXTo
40     #
41     if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
42         HXb = numpy.asarray(Hm( Xb, HO["AppliedInX"]["HXb"] ))
43     else:
44         HXb = numpy.asarray(Hm( Xb ))
45     HXb = HXb.reshape((-1,1))
46     if Y.size != HXb.size:
47         raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
48     if max(Y.shape) != max(HXb.shape):
49         raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
50     #
51     RI = R.getI()
52     if selfA._parameters["Minimizer"] == "LM":
53         RdemiI = R.choleskyI()
54     #
55     Xini = selfA._parameters["InitializationPoint"]
56     #
57     # Définition de la fonction-coût
58     # ------------------------------
59     def CostFunction(x):
60         _X  = numpy.asarray(x).reshape((-1,1))
61         if selfA._parameters["StoreInternalVariables"] or \
62             selfA._toStore("CurrentState") or \
63             selfA._toStore("CurrentOptimum"):
64             selfA.StoredVariables["CurrentState"].store( _X )
65         _HX = numpy.asarray(Hm( _X )).reshape((-1,1))
66         _Innovation = Y - _HX
67         if selfA._toStore("SimulatedObservationAtCurrentState") or \
68             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
69             selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
70         if selfA._toStore("InnovationAtCurrentState"):
71             selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
72         #
73         Jb  = 0.
74         Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
75         J   = Jb + Jo
76         #
77         selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
78         selfA.StoredVariables["CostFunctionJb"].store( Jb )
79         selfA.StoredVariables["CostFunctionJo"].store( Jo )
80         selfA.StoredVariables["CostFunctionJ" ].store( J )
81         if selfA._toStore("IndexOfOptimum") or \
82             selfA._toStore("CurrentOptimum") or \
83             selfA._toStore("CostFunctionJAtCurrentOptimum") or \
84             selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
85             selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
86             selfA._toStore("SimulatedObservationAtCurrentOptimum"):
87             IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
88         if selfA._toStore("IndexOfOptimum"):
89             selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
90         if selfA._toStore("CurrentOptimum"):
91             selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
92         if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
93             selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
94         if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
95             selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
96         if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
97             selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
98         if selfA._toStore("CostFunctionJAtCurrentOptimum"):
99             selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
100         return J
101     #
102     def GradientOfCostFunction(x):
103         _X      = numpy.asarray(x).reshape((-1,1))
104         _HX     = numpy.asarray(Hm( _X )).reshape((-1,1))
105         GradJb  = 0.
106         GradJo  = - Ha( (_X, RI * (Y - _HX)) )
107         GradJ   = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
108         return GradJ
109     #
110     def CostFunctionLM(x):
111         _X  = numpy.ravel( x ).reshape((-1,1))
112         _HX = Hm( _X ).reshape((-1,1))
113         _Innovation = Y - _HX
114         Jb  = 0.
115         Jo  = float( 0.5 * _Innovation.T * (RI * _Innovation) )
116         J   = Jb + Jo
117         if selfA._parameters["StoreInternalVariables"] or \
118             selfA._toStore("CurrentState"):
119             selfA.StoredVariables["CurrentState"].store( _X )
120         selfA.StoredVariables["CostFunctionJb"].store( Jb )
121         selfA.StoredVariables["CostFunctionJo"].store( Jo )
122         selfA.StoredVariables["CostFunctionJ" ].store( J )
123         #
124         return numpy.ravel( RdemiI*_Innovation )
125     #
126     def GradientOfCostFunctionLM(x):
127         _X      = x.reshape((-1,1))
128         return - RdemiI*HO["Tangent"].asMatrix( _X )
129     #
130     # Minimisation de la fonctionnelle
131     # --------------------------------
132     nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
133     #
134     if selfA._parameters["Minimizer"] == "LBFGSB":
135         if "0.19" <= scipy.version.version <= "1.4.99":
136             import daAlgorithms.Atoms.lbfgsb14hlt as optimiseur
137         elif "1.5.0" <= scipy.version.version <= "1.7.99":
138             import daAlgorithms.Atoms.lbfgsb17hlt as optimiseur
139         elif "1.8.0" <= scipy.version.version <= "1.8.99":
140             import daAlgorithms.Atoms.lbfgsb18hlt as optimiseur
141         elif "1.9.0" <= scipy.version.version <= "1.9.99":
142             import daAlgorithms.Atoms.lbfgsb19hlt as optimiseur
143         else:
144             import scipy.optimize as optimiseur
145         Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
146             func        = CostFunction,
147             x0          = Xini,
148             fprime      = GradientOfCostFunction,
149             args        = (),
150             bounds      = selfA._parameters["Bounds"],
151             maxfun      = selfA._parameters["MaximumNumberOfIterations"]-1,
152             factr       = selfA._parameters["CostDecrementTolerance"]*1.e14,
153             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
154             iprint      = selfA._parameters["optiprint"],
155             )
156         # nfeval = Informations['funcalls']
157         # rc     = Informations['warnflag']
158     elif selfA._parameters["Minimizer"] == "TNC":
159         Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
160             func        = CostFunction,
161             x0          = Xini,
162             fprime      = GradientOfCostFunction,
163             args        = (),
164             bounds      = selfA._parameters["Bounds"],
165             maxfun      = selfA._parameters["MaximumNumberOfIterations"],
166             pgtol       = selfA._parameters["ProjectedGradientTolerance"],
167             ftol        = selfA._parameters["CostDecrementTolerance"],
168             messages    = selfA._parameters["optmessages"],
169             )
170     elif selfA._parameters["Minimizer"] == "CG":
171         Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
172             f           = CostFunction,
173             x0          = Xini,
174             fprime      = GradientOfCostFunction,
175             args        = (),
176             maxiter     = selfA._parameters["MaximumNumberOfIterations"],
177             gtol        = selfA._parameters["GradientNormTolerance"],
178             disp        = selfA._parameters["optdisp"],
179             full_output = True,
180             )
181     elif selfA._parameters["Minimizer"] == "NCG":
182         Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
183             f           = CostFunction,
184             x0          = Xini,
185             fprime      = GradientOfCostFunction,
186             args        = (),
187             maxiter     = selfA._parameters["MaximumNumberOfIterations"],
188             avextol     = selfA._parameters["CostDecrementTolerance"],
189             disp        = selfA._parameters["optdisp"],
190             full_output = True,
191             )
192     elif selfA._parameters["Minimizer"] == "BFGS":
193         Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
194             f           = CostFunction,
195             x0          = Xini,
196             fprime      = GradientOfCostFunction,
197             args        = (),
198             maxiter     = selfA._parameters["MaximumNumberOfIterations"],
199             gtol        = selfA._parameters["GradientNormTolerance"],
200             disp        = selfA._parameters["optdisp"],
201             full_output = True,
202             )
203     elif selfA._parameters["Minimizer"] == "LM":
204         Minimum, cov_x, infodict, mesg, rc = scipy.optimize.leastsq(
205             func        = CostFunctionLM,
206             x0          = Xini,
207             Dfun        = GradientOfCostFunctionLM,
208             args        = (),
209             ftol        = selfA._parameters["CostDecrementTolerance"],
210             maxfev      = selfA._parameters["MaximumNumberOfIterations"],
211             gtol        = selfA._parameters["GradientNormTolerance"],
212             full_output = True,
213             )
214         # nfeval = infodict['nfev']
215     else:
216         raise ValueError("Error in minimizer name: %s is unkown"%selfA._parameters["Minimizer"])
217     #
218     IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
219     #
220     # Correction pour pallier a un bug de TNC sur le retour du Minimum
221     # ----------------------------------------------------------------
222     if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
223         Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
224     #
225     Xa = Minimum
226     if __storeState: selfA._setInternalState("Xn", Xa)
227     #--------------------------
228     #
229     selfA.StoredVariables["Analysis"].store( Xa )
230     #
231     if selfA._toStore("OMA") or \
232         selfA._toStore("InnovationAtCurrentAnalysis") or \
233         selfA._toStore("SimulatedObservationAtOptimum"):
234         if selfA._toStore("SimulatedObservationAtCurrentState"):
235             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
236         elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
237             HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]
238         else:
239             HXa = Hm( Xa )
240         oma = Y - HXa.reshape((-1,1))
241     #
242     # Calculs et/ou stockages supplémentaires
243     # ---------------------------------------
244     if selfA._toStore("Innovation") or \
245         selfA._toStore("OMB"):
246         Innovation  = Y - HXb
247     if selfA._toStore("Innovation"):
248         selfA.StoredVariables["Innovation"].store( Innovation )
249     if selfA._toStore("BMA"):
250         selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(Xa) )
251     if selfA._toStore("OMA"):
252         selfA.StoredVariables["OMA"].store( oma )
253     if selfA._toStore("InnovationAtCurrentAnalysis"):
254         selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( oma )
255     if selfA._toStore("OMB"):
256         selfA.StoredVariables["OMB"].store( Innovation )
257     if selfA._toStore("SimulatedObservationAtBackground"):
258         selfA.StoredVariables["SimulatedObservationAtBackground"].store( HXb )
259     if selfA._toStore("SimulatedObservationAtOptimum"):
260         selfA.StoredVariables["SimulatedObservationAtOptimum"].store( HXa )
261     #
262     return 0
263
264 # ==============================================================================
265 if __name__ == "__main__":
266     print('\n AUTODIAGNOSTIC\n')