X-Git-Url: http://git.salome-platform.org/gitweb/?a=blobdiff_plain;ds=sidebyside;f=src%2FdaComposant%2FdaAlgorithms%2F3DVAR.py;h=e343c6450748211d2460c8cd06a52eb1878bfee6;hb=01022e867e0565b15e57ec3643f9fc8b25346402;hp=cee5c22b553ee25c9aec84af73b8993b8e873fe5;hpb=cee976c01b92436ef9531779b2c76f3c20f57549;p=modules%2Fadao.git diff --git a/src/daComposant/daAlgorithms/3DVAR.py b/src/daComposant/daAlgorithms/3DVAR.py index cee5c22..e343c64 100644 --- a/src/daComposant/daAlgorithms/3DVAR.py +++ b/src/daComposant/daAlgorithms/3DVAR.py @@ -1,6 +1,6 @@ #-*-coding:iso-8859-1-*- # -# Copyright (C) 2008-2011 EDF R&D +# Copyright (C) 2008-2012 EDF R&D # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public @@ -18,6 +18,7 @@ # # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com # +# Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D import logging from daCore import BasicObjects, PlatformInfo @@ -26,7 +27,7 @@ m = PlatformInfo.SystemUsage() import numpy import scipy.optimize -if logging.getLogger().level < 30: +if logging.getLogger().level < logging.WARNING: iprint = 1 message = scipy.optimize.tnc.MSG_ALL disp = 1 @@ -38,31 +39,84 @@ else: # ============================================================================== class ElementaryAlgorithm(BasicObjects.Algorithm): def __init__(self): - BasicObjects.Algorithm.__init__(self) - self._name = "3DVAR" - logging.debug("%s Initialisation"%self._name) + BasicObjects.Algorithm.__init__(self, "3DVAR") + self.defineRequiredParameter( + name = "Minimizer", + default = "LBFGSB", + typecast = str, + message = "Minimiseur utilisé", + listval = ["LBFGSB","TNC", "CG", "NCG", "BFGS"], + ) + self.defineRequiredParameter( + name = "MaximumNumberOfSteps", + default = 15000, + typecast = int, + message = "Nombre maximal de pas d'optimisation", + minval = -1, + ) + self.defineRequiredParameter( + name = "CostDecrementTolerance", + default = 1.e-7, + typecast = float, + message = "Diminution relative minimale du cout lors de l'arrêt", + ) + self.defineRequiredParameter( + name = "ProjectedGradientTolerance", + default = -1, + typecast = float, + message = "Maximum des composantes du gradient projeté lors de l'arrêt", + minval = -1, + ) + self.defineRequiredParameter( + name = "GradientNormTolerance", + default = 1.e-05, + typecast = float, + message = "Maximum des composantes du gradient lors de l'arrêt", + ) + self.defineRequiredParameter( + name = "StoreInternalVariables", + default = False, + typecast = bool, + message = "Stockage des variables internes ou intermédiaires du calcul", + ) + self.defineRequiredParameter( + name = "StoreSupplementaryCalculations", + default = [], + typecast = tuple, + message = "Liste de calculs supplémentaires à stocker et/ou effectuer", + listval = ["APosterioriCovariance", "BMA", "OMA", "OMB", "Innovation", "SigmaObs2", "MahalanobisConsistency"] + ) def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None): - """ - Calcul de l'estimateur 3D-VAR - """ logging.debug("%s Lancement"%self._name) - logging.debug("%s Taille mémoire utilisée de %.1f Mo"%(self._name, m.getUsedMemory("Mo"))) + logging.debug("%s Taille mémoire utilisée de %.1f Mo"%(self._name, m.getUsedMemory("M"))) + # + # Paramètres de pilotage + # ---------------------- + self.setParameters(Parameters) + # + if self._parameters.has_key("Bounds") and (type(self._parameters["Bounds"]) is type([]) or type(self._parameters["Bounds"]) is type(())) and (len(self._parameters["Bounds"]) > 0): + Bounds = self._parameters["Bounds"] + logging.debug("%s Prise en compte des bornes effectuee"%(self._name,)) + else: + Bounds = None + # + # Correction pour pallier a un bug de TNC sur le retour du Minimum + if self._parameters.has_key("Minimizer") is "TNC": + self.setParameterValue("StoreInternalVariables",True) # # Opérateur d'observation # ----------------------- Hm = H["Direct"].appliedTo - Ht = H["Adjoint"].appliedInXTo + Ha = H["Adjoint"].appliedInXTo # # Utilisation éventuelle d'un vecteur H(Xb) précalculé # ---------------------------------------------------- if H["AppliedToX"] is not None and H["AppliedToX"].has_key("HXb"): - logging.debug("%s Utilisation de HXb"%self._name) HXb = H["AppliedToX"]["HXb"] else: - logging.debug("%s Calcul de Hm(Xb)"%self._name) HXb = Hm( Xb ) - HXb = numpy.asmatrix(HXb).flatten().T + HXb = numpy.asmatrix(numpy.ravel( HXb )).T # # Calcul de l'innovation # ---------------------- @@ -71,50 +125,46 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): if max(Y.shape) != max(HXb.shape): raise ValueError("The shapes %s of observations Y and %s of observed calculation H(X) are different, they have to be identical."%(Y.shape,HXb.shape)) d = Y - HXb - logging.debug("%s Innovation d = %s"%(self._name, d)) # # Précalcul des inversions de B et R # ---------------------------------- if B is not None: BI = B.I - elif Parameters["B_scalar"] is not None: - BI = 1.0 / Parameters["B_scalar"] + elif self._parameters["B_scalar"] is not None: + BI = 1.0 / self._parameters["B_scalar"] + else: + raise ValueError("Background error covariance matrix has to be properly defined!") # if R is not None: RI = R.I - elif Parameters["R_scalar"] is not None: - RI = 1.0 / Parameters["R_scalar"] + elif self._parameters["R_scalar"] is not None: + RI = 1.0 / self._parameters["R_scalar"] + else: + raise ValueError("Observation error covariance matrix has to be properly defined!") # # Définition de la fonction-coût # ------------------------------ def CostFunction(x): - _X = numpy.asmatrix(x).flatten().T - logging.debug("%s CostFunction X = %s"%(self._name, numpy.asmatrix( _X ).flatten())) + _X = numpy.asmatrix(numpy.ravel( x )).T _HX = Hm( _X ) - _HX = numpy.asmatrix(_HX).flatten().T + _HX = numpy.asmatrix(numpy.ravel( _HX )).T Jb = 0.5 * (_X - Xb).T * BI * (_X - Xb) Jo = 0.5 * (Y - _HX).T * RI * (Y - _HX) J = float( Jb ) + float( Jo ) - logging.debug("%s CostFunction Jb = %s"%(self._name, Jb)) - logging.debug("%s CostFunction Jo = %s"%(self._name, Jo)) - logging.debug("%s CostFunction J = %s"%(self._name, J)) - self.StoredVariables["CurrentState"].store( _X.A1 ) + if self._parameters["StoreInternalVariables"]: + self.StoredVariables["CurrentState"].store( _X.A1 ) self.StoredVariables["CostFunctionJb"].store( Jb ) self.StoredVariables["CostFunctionJo"].store( Jo ) self.StoredVariables["CostFunctionJ" ].store( J ) - return float( J ) + return J # def GradientOfCostFunction(x): - _X = numpy.asmatrix(x).flatten().T - logging.debug("%s GradientOfCostFunction X = %s"%(self._name, numpy.asmatrix( _X ).flatten())) + _X = numpy.asmatrix(numpy.ravel( x )).T _HX = Hm( _X ) - _HX = numpy.asmatrix(_HX).flatten().T + _HX = numpy.asmatrix(numpy.ravel( _HX )).T GradJb = BI * (_X - Xb) - GradJo = - Ht( (_X, RI * (Y - _HX)) ) - GradJ = numpy.asmatrix( GradJb ).flatten().T + numpy.asmatrix( GradJo ).flatten().T - logging.debug("%s GradientOfCostFunction GradJb = %s"%(self._name, numpy.asmatrix( GradJb ).flatten())) - logging.debug("%s GradientOfCostFunction GradJo = %s"%(self._name, numpy.asmatrix( GradJo ).flatten())) - logging.debug("%s GradientOfCostFunction GradJ = %s"%(self._name, numpy.asmatrix( GradJ ).flatten())) + GradJo = - Ha( (_X, RI * (Y - _HX)) ) + GradJ = numpy.asmatrix( numpy.ravel( GradJb ) + numpy.ravel( GradJo ) ).T return GradJ.A1 # # Point de démarrage de l'optimisation : Xini = Xb @@ -123,136 +173,125 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): Xini = Xb.A1.tolist() else: Xini = list(Xb) - logging.debug("%s Point de démarrage Xini = %s"%(self._name, Xini)) - # - # Paramètres de pilotage - # ---------------------- - # Potentiels : "Bounds", "Minimizer", "MaximumNumberOfSteps", "ProjectedGradientTolerance", "GradientNormTolerance", "InnerMinimizer" - if Parameters.has_key("Bounds") and (type(Parameters["Bounds"]) is type([]) or type(Parameters["Bounds"]) is type(())) and (len(Parameters["Bounds"]) > 0): - Bounds = Parameters["Bounds"] - else: - Bounds = None - MinimizerList = ["LBFGSB","TNC", "CG", "NCG", "BFGS"] - if Parameters.has_key("Minimizer") and (Parameters["Minimizer"] in MinimizerList): - Minimizer = str( Parameters["Minimizer"] ) - else: - Minimizer = "LBFGSB" - logging.warning("%s Unknown or undefined minimizer, replaced by the default one \"%s\""%(self._name,Minimizer)) - logging.debug("%s Minimiseur utilisé = %s"%(self._name, Minimizer)) - if Parameters.has_key("MaximumNumberOfSteps") and (Parameters["MaximumNumberOfSteps"] > -1): - maxiter = int( Parameters["MaximumNumberOfSteps"] ) - else: - maxiter = 15000 - logging.debug("%s Nombre maximal de pas d'optimisation = %s"%(self._name, str(maxiter))) - if Parameters.has_key("CostDecrementTolerance") and (Parameters["CostDecrementTolerance"] > 0): - ftol = float(Parameters["CostDecrementTolerance"]) - factr = ftol * 1.e14 - else: - ftol = 1.e-7 - factr = ftol * 1.e14 - logging.debug("%s Diminution relative minimale du cout lors de l'arret = %s"%(self._name, str(1./factr))) - if Parameters.has_key("ProjectedGradientTolerance") and (Parameters["ProjectedGradientTolerance"] > -1): - pgtol = float(Parameters["ProjectedGradientTolerance"]) - else: - pgtol = -1 - logging.debug("%s Maximum des composantes du gradient projete lors de l'arret = %s"%(self._name, str(pgtol))) - if Parameters.has_key("GradientNormTolerance") and (Parameters["GradientNormTolerance"] > -1): - gtol = float(Parameters["GradientNormTolerance"]) - else: - gtol = 1.e-05 - logging.debug("%s Maximum des composantes du gradient lors de l'arret = %s"%(self._name, str(gtol))) - InnerMinimizerList = ["CG", "NCG", "BFGS"] - if Parameters.has_key("InnerMinimizer") and (Parameters["InnerMinimizer"] in InnerMinimizerList): - InnerMinimizer = str( Parameters["InnerMinimizer"] ) - else: - InnerMinimizer = "BFGS" - logging.debug("%s Minimiseur interne utilisé = %s"%(self._name, InnerMinimizer)) # # Minimisation de la fonctionnelle # -------------------------------- - if Minimizer == "LBFGSB": + if self._parameters["Minimizer"] == "LBFGSB": Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b( func = CostFunction, x0 = Xini, fprime = GradientOfCostFunction, args = (), bounds = Bounds, - maxfun = maxiter-1, - factr = factr, - pgtol = pgtol, + maxfun = self._parameters["MaximumNumberOfSteps"]-1, + factr = self._parameters["CostDecrementTolerance"]*1.e14, + pgtol = self._parameters["ProjectedGradientTolerance"], iprint = iprint, ) nfeval = Informations['funcalls'] rc = Informations['warnflag'] - elif Minimizer == "TNC": + elif self._parameters["Minimizer"] == "TNC": Minimum, nfeval, rc = scipy.optimize.fmin_tnc( func = CostFunction, x0 = Xini, fprime = GradientOfCostFunction, args = (), bounds = Bounds, - maxfun = maxiter, - pgtol = pgtol, - ftol = ftol, + maxfun = self._parameters["MaximumNumberOfSteps"], + pgtol = self._parameters["ProjectedGradientTolerance"], + ftol = self._parameters["CostDecrementTolerance"], messages = message, ) - elif Minimizer == "CG": + elif self._parameters["Minimizer"] == "CG": Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg( f = CostFunction, x0 = Xini, fprime = GradientOfCostFunction, args = (), - maxiter = maxiter, - gtol = gtol, + maxiter = self._parameters["MaximumNumberOfSteps"], + gtol = self._parameters["GradientNormTolerance"], disp = disp, full_output = True, ) - elif Minimizer == "NCG": + elif self._parameters["Minimizer"] == "NCG": Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg( f = CostFunction, x0 = Xini, fprime = GradientOfCostFunction, args = (), - maxiter = maxiter, - avextol = ftol, + maxiter = self._parameters["MaximumNumberOfSteps"], + avextol = self._parameters["CostDecrementTolerance"], disp = disp, full_output = True, ) - elif Minimizer == "BFGS": + elif self._parameters["Minimizer"] == "BFGS": Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs( f = CostFunction, x0 = Xini, fprime = GradientOfCostFunction, args = (), - maxiter = maxiter, - gtol = gtol, + maxiter = self._parameters["MaximumNumberOfSteps"], + gtol = self._parameters["GradientNormTolerance"], disp = disp, full_output = True, ) else: - raise ValueError("Error in Minimizer name: %s"%Minimizer) + raise ValueError("Error in Minimizer name: %s"%self._parameters["Minimizer"]) # - # Correction pour pallier a un bug de TNC sur le retour du Minimum - # ---------------------------------------------------------------- StepMin = numpy.argmin( self.StoredVariables["CostFunctionJ"].valueserie() ) MinJ = self.StoredVariables["CostFunctionJ"].valueserie(step = StepMin) - Minimum = self.StoredVariables["CurrentState"].valueserie(step = StepMin) # - logging.debug("%s %s Step of min cost = %s"%(self._name, Minimizer, StepMin)) - logging.debug("%s %s Minimum cost = %s"%(self._name, Minimizer, MinJ)) - logging.debug("%s %s Minimum state = %s"%(self._name, Minimizer, Minimum)) - logging.debug("%s %s Nb of F = %s"%(self._name, Minimizer, nfeval)) - logging.debug("%s %s RetCode = %s"%(self._name, Minimizer, rc)) + # Correction pour pallier a un bug de TNC sur le retour du Minimum + # ---------------------------------------------------------------- + if self._parameters["StoreInternalVariables"]: + Minimum = self.StoredVariables["CurrentState"].valueserie(step = StepMin) # - # Calcul de l'analyse - # -------------------- - Xa = numpy.asmatrix(Minimum).T - logging.debug("%s Analyse Xa = %s"%(self._name, Xa)) + # Obtention de l'analyse + # ---------------------- + Xa = numpy.asmatrix(numpy.ravel( Minimum )).T # self.StoredVariables["Analysis"].store( Xa.A1 ) - self.StoredVariables["Innovation"].store( d.A1 ) # - logging.debug("%s Taille mémoire utilisée de %.1f Mo"%(self._name, m.getUsedMemory("MB"))) + # Calcul de la covariance d'analyse + # --------------------------------- + if "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]: + Ht = H["Tangent"].asMatrix(ValueForMethodForm = Xa) + Ht = Ht.reshape(-1,len(Xa.A1)) # ADAO + HessienneI = [] + nb = len(Xa.A1) + for i in range(nb): + _ee = numpy.matrix(numpy.zeros(nb)).T + _ee[i] = 1. + _HtEE = Ht * _ee + _HtEE = numpy.asmatrix(numpy.ravel( _HtEE )).T + HessienneI.append( ( BI*_ee + Ha((Xa,RI*_HtEE)) ).A1 ) + HessienneI = numpy.matrix( HessienneI ) + A = HessienneI.I + if min(A.shape) != max(A.shape): + raise ValueError("The 3DVAR a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator."%str(A.shape)) + if logging.getLogger().level < logging.WARNING: # La verification n'a lieu qu'en debug + try: + L = numpy.linalg.cholesky( A ) + except: + raise ValueError("The 3DVAR a posteriori covariance matrix A is not symmetric positive-definite. Check your B and R a priori covariances.") + self.StoredVariables["APosterioriCovariance"].store( A ) + # + # Calculs et/ou stockages supplémentaires + # --------------------------------------- + if "Innovation" in self._parameters["StoreSupplementaryCalculations"]: + self.StoredVariables["Innovation"].store( numpy.ravel(d) ) + if "BMA" in self._parameters["StoreSupplementaryCalculations"]: + self.StoredVariables["BMA"].store( numpy.ravel(Xb - Xa) ) + if "OMA" in self._parameters["StoreSupplementaryCalculations"]: + self.StoredVariables["OMA"].store( numpy.ravel(Y - Hm(Xa)) ) + if "OMB" in self._parameters["StoreSupplementaryCalculations"]: + self.StoredVariables["OMB"].store( numpy.ravel(d) ) + if "SigmaObs2" in self._parameters["StoreSupplementaryCalculations"]: + self.StoredVariables["SigmaObs2"].store( float( (d.T * (Y-Hm(Xa))) / R.trace() ) ) + if "MahalanobisConsistency" in self._parameters["StoreSupplementaryCalculations"]: + self.StoredVariables["MahalanobisConsistency"].store( float( 2.*MinJ/len(d) ) ) + # + logging.debug("%s Taille mémoire utilisée de %.1f Mo"%(self._name, m.getUsedMemory("M"))) logging.debug("%s Terminé"%self._name) # return 0