+ #
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CostFunctionJ") \
+ or selfA._toStore("CostFunctionJb") \
+ or selfA._toStore("CostFunctionJo") \
+ or selfA._toStore("APosterioriCovariance") \
+ or selfA._toStore("InnovationAtCurrentAnalysis") \
+ or selfA._toStore("SimulatedObservationAtCurrentAnalysis") \
+ or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ _HXa = numpy.asmatrix(numpy.ravel( H((Xa, Un)) )).T
+ _Innovation = Ynpu - _HXa
+ #
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+ # ---> avec analysis
+ selfA.StoredVariables["Analysis"].store( Xa )
+ if selfA._toStore("SimulatedObservationAtCurrentAnalysis"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"].store( _HXa )
+ if selfA._toStore("InnovationAtCurrentAnalysis"):
+ selfA.StoredVariables["InnovationAtCurrentAnalysis"].store( _Innovation )
+ # ---> avec current state
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CurrentState"):
+ selfA.StoredVariables["CurrentState"].store( Xn )
+ if selfA._toStore("ForecastState"):
+ selfA.StoredVariables["ForecastState"].store( EMX )
+ if selfA._toStore("BMA"):
+ selfA.StoredVariables["BMA"].store( EMX - Xa )
+ if selfA._toStore("InnovationAtCurrentState"):
+ selfA.StoredVariables["InnovationAtCurrentState"].store( - HX_predicted + Ynpu )
+ if selfA._toStore("SimulatedObservationAtCurrentState") \
+ or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( HX_predicted )
+ # ---> autres
+ if selfA._parameters["StoreInternalVariables"] \
+ or selfA._toStore("CostFunctionJ") \
+ or selfA._toStore("CostFunctionJb") \
+ or selfA._toStore("CostFunctionJo") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("APosterioriCovariance"):
+ Jb = float( 0.5 * (Xa - Xb).T * BI * (Xa - Xb) )
+ Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
+ J = Jb + Jo
+ selfA.StoredVariables["CostFunctionJb"].store( Jb )
+ selfA.StoredVariables["CostFunctionJo"].store( Jo )
+ selfA.StoredVariables["CostFunctionJ" ].store( J )
+ #
+ if selfA._toStore("IndexOfOptimum") \
+ or selfA._toStore("CurrentOptimum") \
+ or selfA._toStore("CostFunctionJAtCurrentOptimum") \
+ or selfA._toStore("CostFunctionJbAtCurrentOptimum") \
+ or selfA._toStore("CostFunctionJoAtCurrentOptimum") \
+ or selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+ if selfA._toStore("IndexOfOptimum"):
+ selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+ if selfA._toStore("CurrentOptimum"):
+ selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["Analysis"][IndexMin] )
+ if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentAnalysis"][IndexMin] )
+ if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+ if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+ if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( EnsembleErrorCovariance(Xn) )
+ if selfA._parameters["EstimationOf"] == "Parameters" \
+ and J < previousJMinimum:
+ previousJMinimum = J
+ XaMin = Xa
+ if selfA._toStore("APosterioriCovariance"):
+ covarianceXaMin = Pn
+ #
+ # Stockage final supplémentaire de l'optimum en estimation de paramètres
+ # ----------------------------------------------------------------------
+ if selfA._parameters["EstimationOf"] == "Parameters":
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["Analysis"]) )
+ selfA.StoredVariables["Analysis"].store( XaMin )
+ if selfA._toStore("APosterioriCovariance"):
+ selfA.StoredVariables["APosterioriCovariance"].store( covarianceXaMin )
+ if selfA._toStore("BMA"):
+ selfA.StoredVariables["BMA"].store( numpy.ravel(Xb) - numpy.ravel(XaMin) )
+ #
+ return 0
+
+# ==============================================================================
+def std3dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q):
+ """
+ 3DVAR
+ """
+ #
+ # Initialisations
+ # ---------------
+ #
+ # Opérateurs
+ Hm = HO["Direct"].appliedTo
+ Ha = HO["Adjoint"].appliedInXTo
+ #
+ # Utilisation éventuelle d'un vecteur H(Xb) précalculé
+ if HO["AppliedInX"] is not None and "HXb" in HO["AppliedInX"]:
+ HXb = Hm( Xb, HO["AppliedInX"]["HXb"] )
+ else:
+ HXb = Hm( Xb )
+ HXb = numpy.asmatrix(numpy.ravel( HXb )).T
+ if Y.size != HXb.size:
+ raise ValueError("The size %i of observations Y and %i of observed calculation H(X) are different, they have to be identical."%(Y.size,HXb.size))
+ if max(Y.shape) != max(HXb.shape):
+ raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape))
+ #
+ if selfA._toStore("JacobianMatrixAtBackground"):
+ HtMb = HO["Tangent"].asMatrix(ValueForMethodForm = Xb)
+ HtMb = HtMb.reshape(Y.size,Xb.size) # ADAO & check shape
+ selfA.StoredVariables["JacobianMatrixAtBackground"].store( HtMb )
+ #
+ # Précalcul des inversions de B et R
+ BI = B.getI()
+ RI = R.getI()
+ #
+ # Point de démarrage de l'optimisation
+ Xini = selfA._parameters["InitializationPoint"]
+ #
+ # Définition de la fonction-coût
+ # ------------------------------
+ def CostFunction(x):
+ _X = numpy.asmatrix(numpy.ravel( x )).T
+ if selfA._parameters["StoreInternalVariables"] or \
+ selfA._toStore("CurrentState") or \
+ selfA._toStore("CurrentOptimum"):
+ selfA.StoredVariables["CurrentState"].store( _X )
+ _HX = Hm( _X )
+ _HX = numpy.asmatrix(numpy.ravel( _HX )).T
+ _Innovation = Y - _HX
+ if selfA._toStore("SimulatedObservationAtCurrentState") or \
+ selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentState"].store( _HX )
+ if selfA._toStore("InnovationAtCurrentState"):
+ selfA.StoredVariables["InnovationAtCurrentState"].store( _Innovation )
+ #
+ Jb = float( 0.5 * (_X - Xb).T * BI * (_X - Xb) )
+ Jo = float( 0.5 * _Innovation.T * RI * _Innovation )
+ J = Jb + Jo
+ #
+ selfA.StoredVariables["CurrentIterationNumber"].store( len(selfA.StoredVariables["CostFunctionJ"]) )
+ selfA.StoredVariables["CostFunctionJb"].store( Jb )
+ selfA.StoredVariables["CostFunctionJo"].store( Jo )
+ selfA.StoredVariables["CostFunctionJ" ].store( J )
+ if selfA._toStore("IndexOfOptimum") or \
+ selfA._toStore("CurrentOptimum") or \
+ selfA._toStore("CostFunctionJAtCurrentOptimum") or \
+ selfA._toStore("CostFunctionJbAtCurrentOptimum") or \
+ selfA._toStore("CostFunctionJoAtCurrentOptimum") or \
+ selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+ if selfA._toStore("IndexOfOptimum"):
+ selfA.StoredVariables["IndexOfOptimum"].store( IndexMin )
+ if selfA._toStore("CurrentOptimum"):
+ selfA.StoredVariables["CurrentOptimum"].store( selfA.StoredVariables["CurrentState"][IndexMin] )
+ if selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"].store( selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin] )
+ if selfA._toStore("CostFunctionJbAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJbAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJb"][IndexMin] )
+ if selfA._toStore("CostFunctionJoAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJoAtCurrentOptimum"].store( selfA.StoredVariables["CostFunctionJo"][IndexMin] )
+ if selfA._toStore("CostFunctionJAtCurrentOptimum"):
+ selfA.StoredVariables["CostFunctionJAtCurrentOptimum" ].store( selfA.StoredVariables["CostFunctionJ" ][IndexMin] )
+ return J
+ #
+ def GradientOfCostFunction(x):
+ _X = numpy.asmatrix(numpy.ravel( x )).T
+ _HX = Hm( _X )
+ _HX = numpy.asmatrix(numpy.ravel( _HX )).T
+ GradJb = BI * (_X - Xb)
+ GradJo = - Ha( (_X, RI * (Y - _HX)) )
+ GradJ = numpy.ravel( GradJb ) + numpy.ravel( GradJo )
+ return GradJ
+ #
+ # Minimisation de la fonctionnelle
+ # --------------------------------
+ nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber()
+ #
+ if selfA._parameters["Minimizer"] == "LBFGSB":
+ if "0.19" <= scipy.version.version <= "1.1.0":
+ import lbfgsbhlt as optimiseur
+ else:
+ import scipy.optimize as optimiseur
+ Minimum, J_optimal, Informations = optimiseur.fmin_l_bfgs_b(
+ func = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ bounds = selfA._parameters["Bounds"],
+ maxfun = selfA._parameters["MaximumNumberOfSteps"]-1,
+ factr = selfA._parameters["CostDecrementTolerance"]*1.e14,
+ pgtol = selfA._parameters["ProjectedGradientTolerance"],
+ iprint = selfA._parameters["optiprint"],
+ )
+ nfeval = Informations['funcalls']
+ rc = Informations['warnflag']
+ elif selfA._parameters["Minimizer"] == "TNC":
+ Minimum, nfeval, rc = scipy.optimize.fmin_tnc(
+ func = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ bounds = selfA._parameters["Bounds"],
+ maxfun = selfA._parameters["MaximumNumberOfSteps"],
+ pgtol = selfA._parameters["ProjectedGradientTolerance"],
+ ftol = selfA._parameters["CostDecrementTolerance"],
+ messages = selfA._parameters["optmessages"],
+ )
+ elif selfA._parameters["Minimizer"] == "CG":
+ Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg(
+ f = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ maxiter = selfA._parameters["MaximumNumberOfSteps"],
+ gtol = selfA._parameters["GradientNormTolerance"],
+ disp = selfA._parameters["optdisp"],
+ full_output = True,
+ )
+ elif selfA._parameters["Minimizer"] == "NCG":
+ Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg(
+ f = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ maxiter = selfA._parameters["MaximumNumberOfSteps"],
+ avextol = selfA._parameters["CostDecrementTolerance"],
+ disp = selfA._parameters["optdisp"],
+ full_output = True,
+ )
+ elif selfA._parameters["Minimizer"] == "BFGS":
+ Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs(
+ f = CostFunction,
+ x0 = Xini,
+ fprime = GradientOfCostFunction,
+ args = (),
+ maxiter = selfA._parameters["MaximumNumberOfSteps"],
+ gtol = selfA._parameters["GradientNormTolerance"],
+ disp = selfA._parameters["optdisp"],
+ full_output = True,
+ )
+ else:
+ raise ValueError("Error in Minimizer name: %s"%selfA._parameters["Minimizer"])
+ #
+ IndexMin = numpy.argmin( selfA.StoredVariables["CostFunctionJ"][nbPreviousSteps:] ) + nbPreviousSteps
+ MinJ = selfA.StoredVariables["CostFunctionJ"][IndexMin]
+ #
+ # Correction pour pallier a un bug de TNC sur le retour du Minimum
+ # ----------------------------------------------------------------
+ if selfA._parameters["StoreInternalVariables"] or selfA._toStore("CurrentState"):
+ Minimum = selfA.StoredVariables["CurrentState"][IndexMin]
+ #
+ # Obtention de l'analyse
+ # ----------------------
+ Xa = numpy.asmatrix(numpy.ravel( Minimum )).T
+ #
+ selfA.StoredVariables["Analysis"].store( Xa )
+ #
+ if selfA._toStore("OMA") or \
+ selfA._toStore("SigmaObs2") or \
+ selfA._toStore("SimulationQuantiles") or \
+ selfA._toStore("SimulatedObservationAtOptimum"):
+ if selfA._toStore("SimulatedObservationAtCurrentState"):
+ HXa = selfA.StoredVariables["SimulatedObservationAtCurrentState"][IndexMin]
+ elif selfA._toStore("SimulatedObservationAtCurrentOptimum"):
+ HXa = selfA.StoredVariables["SimulatedObservationAtCurrentOptimum"][-1]