From 090e7c8476a6998a88750af5ce0240377ee02043 Mon Sep 17 00:00:00 2001 From: Jean-Philippe ARGAUD Date: Tue, 19 Jun 2012 18:17:10 +0200 Subject: [PATCH] Changing internally treatment of algorithm parameters --- src/daComposant/daAlgorithms/3DVAR.py | 167 +++++++++--------- src/daComposant/daAlgorithms/Blue.py | 53 +++--- src/daComposant/daAlgorithms/EnsembleBlue.py | 54 ++++-- .../daAlgorithms/LinearLeastSquares.py | 19 +- .../daAlgorithms/NonLinearLeastSquares.py | 152 ++++++++-------- .../daAlgorithms/QuantileRegression.py | 83 ++++----- src/daComposant/daCore/AssimilationStudy.py | 73 ++++++-- src/daComposant/daCore/BasicObjects.py | 83 ++++++++- src/daComposant/daCore/Persistence.py | 110 ++++++------ src/daComposant/daDiagnostics/RMS.py | 2 +- 10 files changed, 488 insertions(+), 308 deletions(-) diff --git a/src/daComposant/daAlgorithms/3DVAR.py b/src/daComposant/daAlgorithms/3DVAR.py index 9031667..896bced 100644 --- a/src/daComposant/daAlgorithms/3DVAR.py +++ b/src/daComposant/daAlgorithms/3DVAR.py @@ -38,9 +38,46 @@ else: # ============================================================================== class ElementaryAlgorithm(BasicObjects.Algorithm): def __init__(self): - BasicObjects.Algorithm.__init__(self) - self._name = "3DVAR" - logging.debug("%s Initialisation"%self._name) + BasicObjects.Algorithm.__init__(self, "3DVAR") + self.defineRequiredParameter( + name = "Minimizer", + default = "LBFGSB", + typecast = str, + message = "Minimiseur utilisé", + listval = ["LBFGSB","TNC", "CG", "NCG", "BFGS"], + ) + self.defineRequiredParameter( + name = "MaximumNumberOfSteps", + default = 15000, + typecast = int, + message = "Nombre maximal de pas d'optimisation", + minval = -1, + ) + self.defineRequiredParameter( + name = "CostDecrementTolerance", + default = 1.e-7, + typecast = float, + message = "Diminution relative minimale du cout lors de l'arrêt", + ) + self.defineRequiredParameter( + name = "ProjectedGradientTolerance", + default = -1, + typecast = float, + message = "Maximum des composantes du gradient projeté lors de l'arrêt", + minval = -1, + ) + self.defineRequiredParameter( + name = "GradientNormTolerance", + default = 1.e-05, + typecast = float, + message = "Maximum des composantes du gradient lors de l'arrêt", + ) + self.defineRequiredParameter( + name = "CalculateAPosterioriCovariance", + default = False, + typecast = bool, + message = "Calcul de la covariance a posteriori", + ) def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None): """ @@ -49,10 +86,20 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): logging.debug("%s Lancement"%self._name) logging.debug("%s Taille mémoire utilisée de %.1f Mo"%(self._name, m.getUsedMemory("Mo"))) # + # Paramètres de pilotage + # ---------------------- + self.setParameters(Parameters) + # + if self._parameters.has_key("Bounds") and (type(self._parameters["Bounds"]) is type([]) or type(self._parameters["Bounds"]) is type(())) and (len(self._parameters["Bounds"]) > 0): + Bounds = self._parameters["Bounds"] + logging.debug("%s Prise en compte des bornes effectuee"%(self._name,)) + else: + Bounds = None + # # Opérateur d'observation # ----------------------- Hm = H["Direct"].appliedTo - Ht = H["Adjoint"].appliedInXTo + Ha = H["Adjoint"].appliedInXTo # # Utilisation éventuelle d'un vecteur H(Xb) précalculé # ---------------------------------------------------- @@ -77,13 +124,17 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): # ---------------------------------- if B is not None: BI = B.I - elif Parameters["B_scalar"] is not None: - BI = 1.0 / Parameters["B_scalar"] + elif self._parameters["B_scalar"] is not None: + BI = 1.0 / self._parameters["B_scalar"] + else: + raise ValueError("Background error covariance matrix has to be properly defined!") # if R is not None: RI = R.I - elif Parameters["R_scalar"] is not None: - RI = 1.0 / Parameters["R_scalar"] + elif self._parameters["R_scalar"] is not None: + RI = 1.0 / self._parameters["R_scalar"] + else: + raise ValueError("Observation error covariance matrix has to be properly defined!") # # Définition de la fonction-coût # ------------------------------ @@ -110,7 +161,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): _HX = Hm( _X ) _HX = numpy.asmatrix(_HX).flatten().T GradJb = BI * (_X - Xb) - GradJo = - Ht( (_X, RI * (Y - _HX)) ) + GradJo = - Ha( (_X, RI * (Y - _HX)) ) GradJ = numpy.asmatrix( GradJb ).flatten().T + numpy.asmatrix( GradJo ).flatten().T logging.debug("%s GradientOfCostFunction GradJb = %s"%(self._name, numpy.asmatrix( GradJb ).flatten())) logging.debug("%s GradientOfCostFunction GradJo = %s"%(self._name, numpy.asmatrix( GradJo ).flatten())) @@ -125,117 +176,69 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): Xini = list(Xb) logging.debug("%s Point de démarrage Xini = %s"%(self._name, Xini)) # - # Paramètres de pilotage - # ---------------------- - # Potentiels : "Bounds", "Minimizer", "MaximumNumberOfSteps", "ProjectedGradientTolerance", "GradientNormTolerance", "InnerMinimizer", "CalculateAPosterioriCovariance" - if Parameters.has_key("Bounds") and (type(Parameters["Bounds"]) is type([]) or type(Parameters["Bounds"]) is type(())) and (len(Parameters["Bounds"]) > 0): - Bounds = Parameters["Bounds"] - else: - Bounds = None - MinimizerList = ["LBFGSB","TNC", "CG", "NCG", "BFGS"] - if Parameters.has_key("Minimizer") and (Parameters["Minimizer"] in MinimizerList): - Minimizer = str( Parameters["Minimizer"] ) - else: - Minimizer = "LBFGSB" - logging.warning("%s Unknown or undefined minimizer, replaced by the default one \"%s\""%(self._name,Minimizer)) - logging.debug("%s Minimiseur utilisé = %s"%(self._name, Minimizer)) - if Parameters.has_key("MaximumNumberOfSteps") and (Parameters["MaximumNumberOfSteps"] > -1): - maxiter = int( Parameters["MaximumNumberOfSteps"] ) - else: - maxiter = 15000 - logging.debug("%s Nombre maximal de pas d'optimisation = %s"%(self._name, str(maxiter))) - if Parameters.has_key("CostDecrementTolerance") and (Parameters["CostDecrementTolerance"] > 0): - ftol = float(Parameters["CostDecrementTolerance"]) - factr = ftol * 1.e14 - else: - ftol = 1.e-7 - factr = ftol * 1.e14 - logging.debug("%s Diminution relative minimale du cout lors de l'arret = %s"%(self._name, str(1./factr))) - if Parameters.has_key("ProjectedGradientTolerance") and (Parameters["ProjectedGradientTolerance"] > -1): - pgtol = float(Parameters["ProjectedGradientTolerance"]) - else: - pgtol = -1 - logging.debug("%s Maximum des composantes du gradient projete lors de l'arret = %s"%(self._name, str(pgtol))) - if Parameters.has_key("GradientNormTolerance") and (Parameters["GradientNormTolerance"] > -1): - gtol = float(Parameters["GradientNormTolerance"]) - else: - gtol = 1.e-05 - logging.debug("%s Maximum des composantes du gradient lors de l'arret = %s"%(self._name, str(gtol))) - InnerMinimizerList = ["CG", "NCG", "BFGS"] - if Parameters.has_key("InnerMinimizer") and (Parameters["InnerMinimizer"] in InnerMinimizerList): - InnerMinimizer = str( Parameters["InnerMinimizer"] ) - else: - InnerMinimizer = "BFGS" - logging.debug("%s Minimiseur interne utilisé = %s"%(self._name, InnerMinimizer)) - if Parameters.has_key("CalculateAPosterioriCovariance"): - CalculateAPosterioriCovariance = bool(Parameters["CalculateAPosterioriCovariance"]) - else: - CalculateAPosterioriCovariance = False - logging.debug("%s Calcul de la covariance a posteriori = %s"%(self._name, CalculateAPosterioriCovariance)) - # # Minimisation de la fonctionnelle # -------------------------------- - if Minimizer == "LBFGSB": + if self._parameters["Minimizer"] == "LBFGSB": Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b( func = CostFunction, x0 = Xini, fprime = GradientOfCostFunction, args = (), bounds = Bounds, - maxfun = maxiter-1, - factr = factr, - pgtol = pgtol, + maxfun = self._parameters["MaximumNumberOfSteps"]-1, + factr = self._parameters["CostDecrementTolerance"]*1.e14, + pgtol = self._parameters["ProjectedGradientTolerance"], iprint = iprint, ) nfeval = Informations['funcalls'] rc = Informations['warnflag'] - elif Minimizer == "TNC": + elif self._parameters["Minimizer"] == "TNC": Minimum, nfeval, rc = scipy.optimize.fmin_tnc( func = CostFunction, x0 = Xini, fprime = GradientOfCostFunction, args = (), bounds = Bounds, - maxfun = maxiter, - pgtol = pgtol, - ftol = ftol, + maxfun = self._parameters["MaximumNumberOfSteps"], + pgtol = self._parameters["ProjectedGradientTolerance"], + ftol = self._parameters["CostDecrementTolerance"], messages = message, ) - elif Minimizer == "CG": + elif self._parameters["Minimizer"] == "CG": Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg( f = CostFunction, x0 = Xini, fprime = GradientOfCostFunction, args = (), - maxiter = maxiter, - gtol = gtol, + maxiter = self._parameters["MaximumNumberOfSteps"], + gtol = self._parameters["GradientNormTolerance"], disp = disp, full_output = True, ) - elif Minimizer == "NCG": + elif self._parameters["Minimizer"] == "NCG": Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg( f = CostFunction, x0 = Xini, fprime = GradientOfCostFunction, args = (), - maxiter = maxiter, - avextol = ftol, + maxiter = self._parameters["MaximumNumberOfSteps"], + avextol = self._parameters["CostDecrementTolerance"], disp = disp, full_output = True, ) - elif Minimizer == "BFGS": + elif self._parameters["Minimizer"] == "BFGS": Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs( f = CostFunction, x0 = Xini, fprime = GradientOfCostFunction, args = (), - maxiter = maxiter, - gtol = gtol, + maxiter = self._parameters["MaximumNumberOfSteps"], + gtol = self._parameters["GradientNormTolerance"], disp = disp, full_output = True, ) else: - raise ValueError("Error in Minimizer name: %s"%Minimizer) + raise ValueError("Error in Minimizer name: %s"%self._parameters["Minimizer"]) # # Correction pour pallier a un bug de TNC sur le retour du Minimum # ---------------------------------------------------------------- @@ -243,11 +246,11 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): MinJ = self.StoredVariables["CostFunctionJ"].valueserie(step = StepMin) Minimum = self.StoredVariables["CurrentState"].valueserie(step = StepMin) # - logging.debug("%s %s Step of min cost = %s"%(self._name, Minimizer, StepMin)) - logging.debug("%s %s Minimum cost = %s"%(self._name, Minimizer, MinJ)) - logging.debug("%s %s Minimum state = %s"%(self._name, Minimizer, Minimum)) - logging.debug("%s %s Nb of F = %s"%(self._name, Minimizer, nfeval)) - logging.debug("%s %s RetCode = %s"%(self._name, Minimizer, rc)) + logging.debug("%s %s Step of min cost = %s"%(self._name, self._parameters["Minimizer"], StepMin)) + logging.debug("%s %s Minimum cost = %s"%(self._name, self._parameters["Minimizer"], MinJ)) + logging.debug("%s %s Minimum state = %s"%(self._name, self._parameters["Minimizer"], Minimum)) + logging.debug("%s %s Nb of F = %s"%(self._name, self._parameters["Minimizer"], nfeval)) + logging.debug("%s %s RetCode = %s"%(self._name, self._parameters["Minimizer"], rc)) # # Obtention de l'analyse # ---------------------- @@ -259,13 +262,13 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): # # Calcul de la covariance d'analyse # --------------------------------- - if CalculateAPosterioriCovariance: + if self._parameters["CalculateAPosterioriCovariance"]: Hessienne = [] nb = len(Xini) for i in range(nb): ee = numpy.matrix(numpy.zeros(nb)).T ee[i] = 1. - Hessienne.append( ( BI*ee + Ht((Xa,RI*Hm(ee))) ).A1 ) + Hessienne.append( ( BI*ee + Ha((Xa,RI*Hm(ee))) ).A1 ) Hessienne = numpy.matrix( Hessienne ) A = Hessienne.I self.StoredVariables["APosterioriCovariance"].store( A ) diff --git a/src/daComposant/daAlgorithms/Blue.py b/src/daComposant/daAlgorithms/Blue.py index 4ab6a00..1f22b5f 100644 --- a/src/daComposant/daAlgorithms/Blue.py +++ b/src/daComposant/daAlgorithms/Blue.py @@ -28,9 +28,13 @@ import numpy # ============================================================================== class ElementaryAlgorithm(BasicObjects.Algorithm): def __init__(self): - BasicObjects.Algorithm.__init__(self) - self._name = "BLUE" - logging.debug("%s Initialisation"%self._name) + BasicObjects.Algorithm.__init__(self, "BLUE") + self.defineRequiredParameter( + name = "CalculateAPosterioriCovariance", + default = False, + typecast = bool, + message = "Calcul de la covariance a posteriori", + ) def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None): """ @@ -39,8 +43,14 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): logging.debug("%s Lancement"%self._name) logging.debug("%s Taille mémoire utilisée de %.1f Mo"%(self._name, m.getUsedMemory("Mo"))) # + # Paramètres de pilotage + # ---------------------- + self.setParameters(Parameters) + # + # Opérateur d'observation + # ----------------------- Hm = H["Direct"].asMatrix() - Ht = H["Adjoint"].asMatrix() + Ha = H["Adjoint"].asMatrix() # # Utilisation éventuelle d'un vecteur H(Xb) précalculé # ---------------------------------------------------- @@ -56,14 +66,18 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): # ---------------------------------- if B is not None: BI = B.I - elif Parameters["B_scalar"] is not None: - BI = 1.0 / Parameters["B_scalar"] - B = Parameters["B_scalar"] + elif self._parameters["B_scalar"] is not None: + BI = 1.0 / self._parameters["B_scalar"] + B = self._parameters["B_scalar"] + else: + raise ValueError("Background error covariance matrix has to be properly defined!") + # if R is not None: RI = R.I - elif Parameters["R_scalar"] is not None: - RI = 1.0 / Parameters["R_scalar"] - R = Parameters["R_scalar"] + elif self._parameters["R_scalar"] is not None: + RI = 1.0 / self._parameters["R_scalar"] + else: + raise ValueError("Observation error covariance matrix has to be properly defined!") # # Calcul de l'innovation # ---------------------- @@ -74,23 +88,16 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): d = Y - HXb logging.debug("%s Innovation d = %s"%(self._name, d)) # - # Paramètres de pilotage - # ---------------------- - # Potentiels : "CalculateAPosterioriCovariance" - if Parameters.has_key("CalculateAPosterioriCovariance"): - CalculateAPosterioriCovariance = bool(Parameters["CalculateAPosterioriCovariance"]) - else: - CalculateAPosterioriCovariance = False - logging.debug("%s Calcul de la covariance a posteriori = %s"%(self._name, CalculateAPosterioriCovariance)) - # # Calcul de la matrice de gain dans l'espace le plus petit et de l'analyse # ------------------------------------------------------------------------ if Y.size <= Xb.size: + if self._parameters["R_scalar"] is not None: + R = self._parameters["R_scalar"] * numpy.eye(len(Y), dtype=numpy.float) logging.debug("%s Calcul de K dans l'espace des observations"%self._name) - K = B * Ht * (Hm * B * Ht + R).I + K = B * Ha * (Hm * B * Ha + R).I else: logging.debug("%s Calcul de K dans l'espace d'ébauche"%self._name) - K = (Ht * RI * Hm + BI).I * Ht * RI + K = (Ha * RI * Hm + BI).I * Ha * RI Xa = Xb + K*d logging.debug("%s Analyse Xa = %s"%(self._name, Xa)) # @@ -111,11 +118,11 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): # # Calcul de la covariance d'analyse # --------------------------------- - if CalculateAPosterioriCovariance: + if self._parameters["CalculateAPosterioriCovariance"]: A = ( 1.0 - K * Hm ) * B self.StoredVariables["APosterioriCovariance"].store( A ) # - logging.debug("%s Taille mémoire utilisée de %.1f Mo"%(self._name, m.getUsedMemory("MB"))) + logging.debug("%s Taille mémoire utilisée de %.1f Mo"%(self._name, m.getUsedMemory("Mo"))) logging.debug("%s Terminé"%self._name) # return 0 diff --git a/src/daComposant/daAlgorithms/EnsembleBlue.py b/src/daComposant/daAlgorithms/EnsembleBlue.py index 521060b..6ab3eff 100644 --- a/src/daComposant/daAlgorithms/EnsembleBlue.py +++ b/src/daComposant/daAlgorithms/EnsembleBlue.py @@ -28,9 +28,12 @@ import numpy # ============================================================================== class ElementaryAlgorithm(BasicObjects.Algorithm): def __init__(self): - BasicObjects.Algorithm.__init__(self) - self._name = "ENSEMBLEBLUE" - logging.debug("%s Initialisation"%self._name) + BasicObjects.Algorithm.__init__(self, "ENSEMBLEBLUE") + self.defineRequiredParameter( + name = "SetSeed", + typecast = numpy.random.seed, + message = "Graine fixée pour le générateur aléatoire", + ) def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None ): """ @@ -44,12 +47,24 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): # # Paramètres de pilotage # ---------------------- - # Potentiels : "SetSeed" - if Parameters.has_key("SetSeed"): - numpy.random.seed(int(Parameters["SetSeed"])) - logging.debug("%s Graine fixee pour le generateur aleatoire = %s"%(self._name, int(Parameters["SetSeed"]))) + self.setParameters(Parameters) + # + # Précalcul des inversions de B et R + # ---------------------------------- + if B is not None: + BI = B.I + elif self._parameters["B_scalar"] is not None: + BI = 1.0 / self._parameters["B_scalar"] + B = self._parameters["B_scalar"] + else: + raise ValueError("Background error covariance matrix has to be properly defined!") + # + if R is not None: + RI = R.I + elif self._parameters["R_scalar"] is not None: + RI = 1.0 / self._parameters["R_scalar"] else: - logging.debug("%s Graine quelconque pour le generateur aleatoire"%(self._name, )) + raise ValueError("Observation error covariance matrix has to be properly defined!") # # Nombre d'ensemble pour l'ébauche # -------------------------------- @@ -68,9 +83,18 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): # Initialisation des opérateurs d'observation et de la matrice gain # ----------------------------------------------------------------- Hm = H["Direct"].asMatrix() - Ht = H["Adjoint"].asMatrix() + Ha = H["Adjoint"].asMatrix() # - K = B * Ht * (Hm * B * Ht + R).I + # Calcul de la matrice de gain dans l'espace le plus petit et de l'analyse + # ------------------------------------------------------------------------ + if Y.size <= Xb.valueserie(0).size: + if self._parameters["R_scalar"] is not None: + R = self._parameters["R_scalar"] * numpy.eye(len(Y), dtype=numpy.float) + logging.debug("%s Calcul de K dans l'espace des observations"%self._name) + K = B * Ha * (Hm * B * Ha + R).I + else: + logging.debug("%s Calcul de K dans l'espace d'ébauche"%self._name) + K = (Ha * RI * Hm + BI).I * Ha * RI # # Calcul du BLUE pour chaque membre de l'ensemble # ----------------------------------------------- @@ -78,9 +102,15 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): d = EnsembleY[:,iens] - Hm * Xb.valueserie(iens) Xa = Xb.valueserie(iens) + K*d - self.StoredVariables["Analysis"].store( Xa.A1 ) + self.StoredVariables["CurrentState"].store( Xa.A1 ) self.StoredVariables["Innovation"].store( d.A1 ) # + # Fabrication de l'analyse + # ------------------------ + Members = self.StoredVariables["CurrentState"].valueserie()[-nb_ens:] + Xa = numpy.matrix( Members ).mean(axis=0) + self.StoredVariables["Analysis"].store( Xa.A1 ) + # logging.debug("%s Taille mémoire utilisée de %.1f Mo"%(self._name, m.getUsedMemory("Mo"))) logging.debug("%s Terminé"%self._name) return 0 @@ -88,5 +118,3 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): # ============================================================================== if __name__ == "__main__": print '\n AUTODIAGNOSTIC \n' - - diff --git a/src/daComposant/daAlgorithms/LinearLeastSquares.py b/src/daComposant/daAlgorithms/LinearLeastSquares.py index 6b7b4ab..bf8b289 100644 --- a/src/daComposant/daAlgorithms/LinearLeastSquares.py +++ b/src/daComposant/daAlgorithms/LinearLeastSquares.py @@ -26,8 +26,7 @@ m = PlatformInfo.SystemUsage() # ============================================================================== class ElementaryAlgorithm(BasicObjects.Algorithm): def __init__(self): - BasicObjects.Algorithm.__init__(self) - self._name = "LINEARLEASTSQUARES" + BasicObjects.Algorithm.__init__(self, "LINEARLEASTSQUARES") def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None): """ @@ -37,15 +36,23 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): logging.debug("%s Lancement"%self._name) logging.debug("%s Taille mémoire utilisée de %.1f Mo"%(self._name, m.getUsedMemory("Mo"))) # + # Paramètres de pilotage + # ---------------------- + self.setParameters(Parameters) + # + # Opérateur d'observation + # ----------------------- Hm = H["Direct"].asMatrix() - Ht = H["Adjoint"].asMatrix() + Ha = H["Adjoint"].asMatrix() # if R is not None: RI = R.I - elif Parameters["R_scalar"] is not None: - RI = 1.0 / Parameters["R_scalar"] + elif self._parameters["R_scalar"] is not None: + RI = 1.0 / self._parameters["R_scalar"] + else: + raise ValueError("Observation error covariance matrix has to be properly defined!") # - K = (Ht * RI * Hm ).I * Ht * RI + K = (Ha * RI * Hm ).I * Ha * RI Xa = K * Y logging.debug("%s Analyse Xa = %s"%(self._name, Xa)) # diff --git a/src/daComposant/daAlgorithms/NonLinearLeastSquares.py b/src/daComposant/daAlgorithms/NonLinearLeastSquares.py index 2578225..03cf56b 100644 --- a/src/daComposant/daAlgorithms/NonLinearLeastSquares.py +++ b/src/daComposant/daAlgorithms/NonLinearLeastSquares.py @@ -38,9 +38,40 @@ else: # ============================================================================== class ElementaryAlgorithm(BasicObjects.Algorithm): def __init__(self): - BasicObjects.Algorithm.__init__(self) - self._name = "NONLINEARLEASTSQUARES" - logging.debug("%s Initialisation"%self._name) + BasicObjects.Algorithm.__init__(self, "NONLINEARLEASTSQUARES") + self.defineRequiredParameter( + name = "Minimizer", + default = "LBFGSB", + typecast = str, + message = "Minimiseur utilisé", + listval = ["LBFGSB","TNC", "CG", "NCG", "BFGS"], + ) + self.defineRequiredParameter( + name = "MaximumNumberOfSteps", + default = 15000, + typecast = int, + message = "Nombre maximal de pas d'optimisation", + minval = -1 + ) + self.defineRequiredParameter( + name = "CostDecrementTolerance", + default = 1.e-7, + typecast = float, + message = "Diminution relative minimale du cout lors de l'arrêt", + ) + self.defineRequiredParameter( + name = "ProjectedGradientTolerance", + default = -1, + typecast = float, + message = "Maximum des composantes du gradient projeté lors de l'arrêt", + minval = -1, + ) + self.defineRequiredParameter( + name = "GradientNormTolerance", + default = 1.e-05, + typecast = float, + message = "Maximum des composantes du gradient lors de l'arrêt", + ) def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None): """ @@ -50,10 +81,20 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): logging.debug("%s Lancement"%self._name) logging.debug("%s Taille mémoire utilisée de %.1f Mo"%(self._name, m.getUsedMemory("Mo"))) # + # Paramètres de pilotage + # ---------------------- + self.setParameters(Parameters) + # + if self._parameters.has_key("Bounds") and (type(self._parameters["Bounds"]) is type([]) or type(self._parameters["Bounds"]) is type(())) and (len(self._parameters["Bounds"]) > 0): + Bounds = self._parameters["Bounds"] + logging.debug("%s Prise en compte des bornes effectuee"%(self._name,)) + else: + Bounds = None + # # Opérateur d'observation # ----------------------- Hm = H["Direct"].appliedTo - Ht = H["Adjoint"].appliedInXTo + Ha = H["Adjoint"].appliedInXTo # # Utilisation éventuelle d'un vecteur H(Xb) précalculé # ---------------------------------------------------- @@ -78,13 +119,17 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): # ---------------------------------- # if B is not None: # BI = B.I - # elif Parameters["B_scalar"] is not None: - # BI = 1.0 / Parameters["B_scalar"] + # elif self._parameters["B_scalar"] is not None: + # BI = 1.0 / self._parameters["B_scalar"] + # else: + # raise ValueError("Background error covariance matrix has to be properly defined!") # if R is not None: RI = R.I - elif Parameters["R_scalar"] is not None: - RI = 1.0 / Parameters["R_scalar"] + elif self._parameters["R_scalar"] is not None: + RI = 1.0 / self._parameters["R_scalar"] + else: + raise ValueError("Observation error covariance matrix has to be properly defined!") # # Définition de la fonction-coût # ------------------------------ @@ -111,7 +156,7 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): _HX = Hm( _X ) _HX = numpy.asmatrix(_HX).flatten().T GradJb = 0. - GradJo = - Ht( (_X, RI * (Y - _HX)) ) + GradJo = - Ha( (_X, RI * (Y - _HX)) ) GradJ = numpy.asmatrix( GradJb ).flatten().T + numpy.asmatrix( GradJo ).flatten().T logging.debug("%s GradientOfCostFunction GradJb = %s"%(self._name, numpy.asmatrix( GradJb ).flatten())) logging.debug("%s GradientOfCostFunction GradJo = %s"%(self._name, numpy.asmatrix( GradJo ).flatten())) @@ -126,112 +171,69 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): Xini = list(Xb) logging.debug("%s Point de démarrage Xini = %s"%(self._name, Xini)) # - # Paramètres de pilotage - # ---------------------- - # Potentiels : "Bounds", "Minimizer", "MaximumNumberOfSteps", "ProjectedGradientTolerance", "GradientNormTolerance", "InnerMinimizer" - if Parameters.has_key("Bounds") and (type(Parameters["Bounds"]) is type([]) or type(Parameters["Bounds"]) is type(())) and (len(Parameters["Bounds"]) > 0): - Bounds = Parameters["Bounds"] - else: - Bounds = None - MinimizerList = ["LBFGSB","TNC", "CG", "NCG", "BFGS"] - if Parameters.has_key("Minimizer") and (Parameters["Minimizer"] in MinimizerList): - Minimizer = str( Parameters["Minimizer"] ) - else: - Minimizer = "LBFGSB" - logging.warning("%s Unknown or undefined minimizer, replaced by the default one \"%s\""%(self._name,Minimizer)) - logging.debug("%s Minimiseur utilisé = %s"%(self._name, Minimizer)) - if Parameters.has_key("MaximumNumberOfSteps") and (Parameters["MaximumNumberOfSteps"] > -1): - maxiter = int( Parameters["MaximumNumberOfSteps"] ) - else: - maxiter = 15000 - logging.debug("%s Nombre maximal de pas d'optimisation = %s"%(self._name, str(maxiter))) - if Parameters.has_key("CostDecrementTolerance") and (Parameters["CostDecrementTolerance"] > 0): - ftol = float(Parameters["CostDecrementTolerance"]) - factr = ftol * 1.e14 - else: - ftol = 1.e-7 - factr = ftol * 1.e14 - logging.debug("%s Diminution relative minimale du cout lors de l'arret = %s"%(self._name, str(1./factr))) - if Parameters.has_key("ProjectedGradientTolerance") and (Parameters["ProjectedGradientTolerance"] > -1): - pgtol = float(Parameters["ProjectedGradientTolerance"]) - else: - pgtol = -1 - logging.debug("%s Maximum des composantes du gradient projete lors de l'arret = %s"%(self._name, str(pgtol))) - if Parameters.has_key("GradientNormTolerance") and (Parameters["GradientNormTolerance"] > -1): - gtol = float(Parameters["GradientNormTolerance"]) - else: - gtol = 1.e-05 - logging.debug("%s Maximum des composantes du gradient lors de l'arret = %s"%(self._name, str(gtol))) - InnerMinimizerList = ["CG", "NCG", "BFGS"] - if Parameters.has_key("InnerMinimizer") and (Parameters["InnerMinimizer"] in InnerMinimizerList): - InnerMinimizer = str( Parameters["InnerMinimizer"] ) - else: - InnerMinimizer = "BFGS" - logging.debug("%s Minimiseur interne utilisé = %s"%(self._name, InnerMinimizer)) - # # Minimisation de la fonctionnelle # -------------------------------- - if Minimizer == "LBFGSB": + if self._parameters["Minimizer"] == "LBFGSB": Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b( func = CostFunction, x0 = Xini, fprime = GradientOfCostFunction, args = (), bounds = Bounds, - maxfun = maxiter-1, - factr = factr, - pgtol = pgtol, + maxfun = self._parameters["MaximumNumberOfSteps"]-1, + factr = self._parameters["CostDecrementTolerance"]*1.e14, + pgtol = self._parameters["ProjectedGradientTolerance"], iprint = iprint, ) nfeval = Informations['funcalls'] rc = Informations['warnflag'] - elif Minimizer == "TNC": + elif self._parameters["Minimizer"] == "TNC": Minimum, nfeval, rc = scipy.optimize.fmin_tnc( func = CostFunction, x0 = Xini, fprime = GradientOfCostFunction, args = (), bounds = Bounds, - maxfun = maxiter, - pgtol = pgtol, - ftol = ftol, + maxfun = self._parameters["MaximumNumberOfSteps"], + pgtol = self._parameters["ProjectedGradientTolerance"], + ftol = self._parameters["CostDecrementTolerance"], messages = message, ) - elif Minimizer == "CG": + elif self._parameters["Minimizer"] == "CG": Minimum, fopt, nfeval, grad_calls, rc = scipy.optimize.fmin_cg( f = CostFunction, x0 = Xini, fprime = GradientOfCostFunction, args = (), - maxiter = maxiter, - gtol = gtol, + maxiter = self._parameters["MaximumNumberOfSteps"], + gtol = self._parameters["GradientNormTolerance"], disp = disp, full_output = True, ) - elif Minimizer == "NCG": + elif self._parameters["Minimizer"] == "NCG": Minimum, fopt, nfeval, grad_calls, hcalls, rc = scipy.optimize.fmin_ncg( f = CostFunction, x0 = Xini, fprime = GradientOfCostFunction, args = (), - maxiter = maxiter, - avextol = ftol, + maxiter = self._parameters["MaximumNumberOfSteps"], + avextol = self._parameters["CostDecrementTolerance"], disp = disp, full_output = True, ) - elif Minimizer == "BFGS": + elif self._parameters["Minimizer"] == "BFGS": Minimum, fopt, gopt, Hopt, nfeval, grad_calls, rc = scipy.optimize.fmin_bfgs( f = CostFunction, x0 = Xini, fprime = GradientOfCostFunction, args = (), - maxiter = maxiter, - gtol = gtol, + maxiter = self._parameters["MaximumNumberOfSteps"], + gtol = self._parameters["GradientNormTolerance"], disp = disp, full_output = True, ) else: - raise ValueError("Error in Minimizer name: %s"%Minimizer) + raise ValueError("Error in Minimizer name: %s"%self._parameters["Minimizer"]) # # Correction pour pallier a un bug de TNC sur le retour du Minimum # ---------------------------------------------------------------- @@ -239,11 +241,11 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): MinJ = self.StoredVariables["CostFunctionJ"].valueserie(step = StepMin) Minimum = self.StoredVariables["CurrentState"].valueserie(step = StepMin) # - logging.debug("%s %s Step of min cost = %s"%(self._name, Minimizer, StepMin)) - logging.debug("%s %s Minimum cost = %s"%(self._name, Minimizer, MinJ)) - logging.debug("%s %s Minimum state = %s"%(self._name, Minimizer, Minimum)) - logging.debug("%s %s Nb of F = %s"%(self._name, Minimizer, nfeval)) - logging.debug("%s %s RetCode = %s"%(self._name, Minimizer, rc)) + logging.debug("%s %s Step of min cost = %s"%(self._name, self._parameters["Minimizer"], StepMin)) + logging.debug("%s %s Minimum cost = %s"%(self._name, self._parameters["Minimizer"], MinJ)) + logging.debug("%s %s Minimum state = %s"%(self._name, self._parameters["Minimizer"], Minimum)) + logging.debug("%s %s Nb of F = %s"%(self._name, self._parameters["Minimizer"], nfeval)) + logging.debug("%s %s RetCode = %s"%(self._name, self._parameters["Minimizer"], rc)) # # Obtention de l'analyse # ---------------------- diff --git a/src/daComposant/daAlgorithms/QuantileRegression.py b/src/daComposant/daAlgorithms/QuantileRegression.py index d72157c..78a602f 100644 --- a/src/daComposant/daAlgorithms/QuantileRegression.py +++ b/src/daComposant/daAlgorithms/QuantileRegression.py @@ -28,9 +28,35 @@ import numpy # ============================================================================== class ElementaryAlgorithm(BasicObjects.Algorithm): def __init__(self): - BasicObjects.Algorithm.__init__(self) - self._name = "QUANTILEREGRESSION" - logging.debug("%s Initialisation"%self._name) + BasicObjects.Algorithm.__init__(self, "QUANTILEREGRESSION") + self.defineRequiredParameter( + name = "Quantile", + default = 0.5, + typecast = float, + message = "Quantile pour la regression de quantile", + minval = 0., + maxval = 1., + ) + self.defineRequiredParameter( + name = "Minimizer", + default = "MMQR", + typecast = str, + message = "Minimiseur utilisé", + listval = ["MMQR"], + ) + self.defineRequiredParameter( + name = "MaximumNumberOfSteps", + default = 15000, + typecast = int, + message = "Nombre maximal de pas d'optimisation", + minval = -1 + ) + self.defineRequiredParameter( + name = "CostDecrementTolerance", + default = 1.e-6, + typecast = float, + message = "Maximum de variation de la fonction d'estimation lors de l'arrêt", + ) def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None): """ @@ -39,10 +65,13 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): logging.debug("%s Lancement"%self._name) logging.debug("%s Taille mémoire utilisée de %.1f Mo"%(self._name, m.getUsedMemory("Mo"))) # + # Paramètres de pilotage + # ---------------------- + self.setParameters(Parameters) + # # Opérateur d'observation # ----------------------- Hm = H["Direct"].appliedTo - Ht = H["Adjoint"].appliedInXTo # # Utilisation éventuelle d'un vecteur H(Xb) précalculé # ---------------------------------------------------- @@ -96,55 +125,29 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): Xini = list(Xb) logging.debug("%s Point de démarrage Xini = %s"%(self._name, Xini)) # - # Paramètres de pilotage - # ---------------------- - # Potentiels : "Quantile", "Minimizer", "MaximumNumberOfSteps", "CostDecrementTolerance" - if Parameters.has_key("Quantile") and (0. <= Parameters["Quantile"] <= 1.): - quantile = float(Parameters["Quantile"]) - else: - quantile = 0.5 - logging.debug("%s Quantile pour la regression de quantile = %s"%(self._name, str(quantile))) - MinimizerList = ["MMQR",] - if Parameters.has_key("Minimizer") and (Parameters["Minimizer"] in MinimizerList): - Minimizer = str( Parameters["Minimizer"] ) - else: - Minimizer = "MMQR" - logging.warning("%s Unknown or undefined minimizer, replaced by the default one \"%s\""%(self._name,Minimizer)) - logging.debug("%s Minimiseur utilisé = %s"%(self._name, Minimizer)) - if Parameters.has_key("MaximumNumberOfSteps") and (Parameters["MaximumNumberOfSteps"] > -1): - maxiter = int( Parameters["MaximumNumberOfSteps"] ) - else: - maxiter = 15000 - logging.debug("%s Nombre maximal de pas d'optimisation = %s"%(self._name, str(maxiter))) - if Parameters.has_key("CostDecrementTolerance") and (Parameters["CostDecrementTolerance"] > 0): - ftol = float(Parameters["CostDecrementTolerance"]) - else: - ftol = 1.e-06 - logging.debug("%s Maximum de variation de la fonction d'estimation lors de l'arrêt = %s"%(self._name, str(ftol))) - # # Minimisation de la fonctionnelle # -------------------------------- - if Minimizer == "MMQR": + if self._parameters["Minimizer"] == "MMQR": import mmqr Minimum, J_optimal, Informations = mmqr.mmqr( func = CostFunction, x0 = Xini, fprime = GradientOfCostFunction, - quantile = quantile, - maxfun = maxiter, - toler = ftol, + quantile = self._parameters["Quantile"], + maxfun = self._parameters["MaximumNumberOfSteps"], + toler = self._parameters["CostDecrementTolerance"], y = Y, ) nfeval = Informations[2] rc = Informations[4] else: - raise ValueError("Error in Minimizer name: %s"%Minimizer) + raise ValueError("Error in Minimizer name: %s"%self._parameters["Minimizer"]) # - logging.debug("%s %s Step of min cost = %s"%(self._name, Minimizer, nfeval)) - logging.debug("%s %s Minimum cost = %s"%(self._name, Minimizer, J_optimal)) - logging.debug("%s %s Minimum state = %s"%(self._name, Minimizer, Minimum)) - logging.debug("%s %s Nb of F = %s"%(self._name, Minimizer, nfeval)) - logging.debug("%s %s RetCode = %s"%(self._name, Minimizer, rc)) + logging.debug("%s %s Step of min cost = %s"%(self._name, self._parameters["Minimizer"], nfeval)) + logging.debug("%s %s Minimum cost = %s"%(self._name, self._parameters["Minimizer"], J_optimal)) + logging.debug("%s %s Minimum state = %s"%(self._name, self._parameters["Minimizer"], Minimum)) + logging.debug("%s %s Nb of F = %s"%(self._name, self._parameters["Minimizer"], nfeval)) + logging.debug("%s %s RetCode = %s"%(self._name, self._parameters["Minimizer"], rc)) # # Obtention de l'analyse # ---------------------- diff --git a/src/daComposant/daCore/AssimilationStudy.py b/src/daComposant/daCore/AssimilationStudy.py index 58d48b6..70ec582 100644 --- a/src/daComposant/daCore/AssimilationStudy.py +++ b/src/daComposant/daCore/AssimilationStudy.py @@ -135,6 +135,7 @@ class AssimilationStudy: def setBackgroundError(self, asCovariance = None, asEyeByScalar = None, + asEyeByVector = None, toBeStored = False, ): """ @@ -144,15 +145,23 @@ class AssimilationStudy: - asEyeByScalar : entrée des données comme un seul scalaire de variance, multiplicatif d'une matrice de corrélation identité, aucune matrice n'étant donc explicitement à donner + - asEyeByVector : entrée des données comme un seul vecteur de variance, + à mettre sur la diagonale d'une matrice de corrélation, aucune matrice + n'étant donc explicitement à donner - toBeStored : booléen indiquant si la donnée d'entrée est sauvée pour être rendue disponible au même titre que les variables de calcul """ if asEyeByScalar is not None: self.__B_scalar = float(asEyeByScalar) self.__B = None - else: + elif asEyeByVector is not None: + self.__B_scalar = None + self.__B = numpy.matrix( numpy.diagflat( asEyeByVector ), numpy.float ) + elif asCovariance is not None: self.__B_scalar = None - self.__B = numpy.matrix( asCovariance, numpy.float ) + self.__B = numpy.matrix( asCovariance, numpy.float ) + else: + raise ValueError("Background error covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix") # self.__Parameters["B_scalar"] = self.__B_scalar if toBeStored: @@ -192,6 +201,7 @@ class AssimilationStudy: def setObservationError(self, asCovariance = None, asEyeByScalar = None, + asEyeByVector = None, toBeStored = False, ): """ @@ -201,15 +211,23 @@ class AssimilationStudy: - asEyeByScalar : entrée des données comme un seul scalaire de variance, multiplicatif d'une matrice de corrélation identité, aucune matrice n'étant donc explicitement à donner + - asEyeByVector : entrée des données comme un seul vecteur de variance, + à mettre sur la diagonale d'une matrice de corrélation, aucune matrice + n'étant donc explicitement à donner - toBeStored : booléen indiquant si la donnée d'entrée est sauvée pour être rendue disponible au même titre que les variables de calcul """ if asEyeByScalar is not None: self.__R_scalar = float(asEyeByScalar) self.__R = None - else: + elif asEyeByVector is not None: self.__R_scalar = None - self.__R = numpy.matrix( asCovariance, numpy.float ) + self.__R = numpy.matrix( numpy.diagflat( asEyeByVector ), numpy.float ) + elif asCovariance is not None: + self.__R_scalar = None + self.__R = numpy.matrix( asCovariance, numpy.float ) + else: + raise ValueError("Observation error covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix") # self.__Parameters["R_scalar"] = self.__R_scalar if toBeStored: @@ -244,18 +262,18 @@ class AssimilationStudy: asFunction.has_key("Tangent") and asFunction.has_key("Adjoint") and \ (asFunction["Tangent"] is not None) and (asFunction["Adjoint"] is not None): if not asFunction.has_key("Direct") or (asFunction["Direct"] is None): - self.__H["Direct"] = Operator( fromMethod = asFunction["Tangent"] ) + self.__H["Direct"] = Operator( fromMethod = asFunction["Tangent"] ) else: self.__H["Direct"] = Operator( fromMethod = asFunction["Direct"] ) self.__H["Tangent"] = Operator( fromMethod = asFunction["Tangent"] ) self.__H["Adjoint"] = Operator( fromMethod = asFunction["Adjoint"] ) elif asMatrix is not None: - mat = numpy.matrix( asMatrix, numpy.float ) - self.__H["Direct"] = Operator( fromMatrix = mat ) - self.__H["Tangent"] = Operator( fromMatrix = mat ) - self.__H["Adjoint"] = Operator( fromMatrix = mat.T ) + matrice = numpy.matrix( asMatrix, numpy.float ) + self.__H["Direct"] = Operator( fromMatrix = matrice ) + self.__H["Tangent"] = Operator( fromMatrix = matrice ) + self.__H["Adjoint"] = Operator( fromMatrix = matrice.T ) else: - raise ValueError("Error: improperly defined observation operator") + raise ValueError("Improperly defined observation operator, it requires at minima either a matrix or a Tangent/Adjoint pair.") # if appliedToX is not None: self.__H["AppliedToX"] = {} @@ -298,7 +316,9 @@ class AssimilationStudy: - toBeStored : booléen indiquant si la donnée d'entrée est sauvée pour être rendue disponible au même titre que les variables de calcul """ - if (type(asFunction) is type({})) and (asFunction["Tangent"] is not None) and (asFunction["Adjoint"] is not None): + if (type(asFunction) is type({})) and \ + asFunction.has_key("Tangent") and asFunction.has_key("Adjoint") and \ + (asFunction["Tangent"] is not None) and (asFunction["Adjoint"] is not None): if not asFunction.has_key("Direct") or (asFunction["Direct"] is None): self.__M["Direct"] = Operator( fromMethod = asFunction["Tangent"] ) else: @@ -311,7 +331,7 @@ class AssimilationStudy: self.__M["Tangent"] = Operator( fromMatrix = matrice ) self.__M["Adjoint"] = Operator( fromMatrix = matrice.T ) else: - raise ValueError("Error: improperly defined evolution operator") + raise ValueError("Improperly defined evolution operator, it requires at minima either a matrix or a Tangent/Adjoint pair.") # if toBeStored: self.__StoredInputs["EvolutionModel"] = self.__M @@ -320,6 +340,7 @@ class AssimilationStudy: def setEvolutionError(self, asCovariance = None, asEyeByScalar = None, + asEyeByVector = None, toBeStored = False, ): """ @@ -329,15 +350,23 @@ class AssimilationStudy: - asEyeByScalar : entrée des données comme un seul scalaire de variance, multiplicatif d'une matrice de corrélation identité, aucune matrice n'étant donc explicitement à donner + - asEyeByVector : entrée des données comme un seul vecteur de variance, + à mettre sur la diagonale d'une matrice de corrélation, aucune matrice + n'étant donc explicitement à donner - toBeStored : booléen indiquant si la donnée d'entrée est sauvée pour être rendue disponible au même titre que les variables de calcul """ if asEyeByScalar is not None: self.__Q_scalar = float(asEyeByScalar) self.__Q = None - else: + elif asEyeByVector is not None: self.__Q_scalar = None - self.__Q = numpy.matrix( asCovariance, numpy.float ) + self.__Q = numpy.matrix( numpy.diagflat( asEyeByVector ), numpy.float ) + elif asCovariance is not None: + self.__Q_scalar = None + self.__Q = numpy.matrix( asCovariance, numpy.float ) + else: + raise ValueError("Evolution error covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix") # self.__Parameters["Q_scalar"] = self.__Q_scalar if toBeStored: @@ -401,7 +430,7 @@ class AssimilationStudy: # Instancie un objet du type élémentaire du fichier # ------------------------------------------------- self.__algorithm = self.__algorithmFile.ElementaryAlgorithm() - self.__StoredInputs["AlgorithmName"] = str(choice) + self.__StoredInputs["AlgorithmName"] = self.__algorithmName return 0 def setAlgorithmParameters(self, asDico=None): @@ -414,6 +443,12 @@ class AssimilationStudy: # self.__StoredInputs["AlgorithmParameters"] = self.__Parameters return 0 + + def getAlgorithmParameters(self, noDetails=True): + """ + Renvoie la liste des paramètres requis selon l'algorithme + """ + return self.__algorithm.getRequiredParameters(noDetails) # ----------------------------------------------------------- def setDiagnostic(self, choice = None, name = "", unit = "", basetype = None, parameters = {} ): @@ -824,6 +859,14 @@ if __name__ == "__main__": # print " Chemin des diagnostics.....................:", ADD.get_diagnostics_main_path() print "Variables disponibles.........................:", ADD.get_available_variables() print + + print "Paramètres requis par algorithme :" + for algo in ADD.get_available_algorithms(): + tmpADD = AssimilationStudy("Un algorithme") + tmpADD.setAlgorithm(choice=algo) + print " %25s : %s"%(algo,tmpADD.getAlgorithmParameters()) + del tmpADD + print ADD.setDiagnostic("RMS", "Ma RMS") diff --git a/src/daComposant/daCore/BasicObjects.py b/src/daComposant/daCore/BasicObjects.py index 2533079..650ad22 100644 --- a/src/daComposant/daCore/BasicObjects.py +++ b/src/daComposant/daCore/BasicObjects.py @@ -28,6 +28,7 @@ __doc__ = """ """ __author__ = "Jean-Philippe ARGAUD" +import logging import numpy import Persistence @@ -47,12 +48,18 @@ class Operator: if fromMethod is not None: self.__Method = fromMethod self.__Matrix = None + self.__Type = "Method" elif fromMatrix is not None: self.__Method = None self.__Matrix = numpy.matrix( fromMatrix, numpy.float ) + self.__Type = "Matrix" else: self.__Method = None self.__Matrix = None + self.__Type = None + + def isType(self): + return self.__Type def appliedTo(self, xValue): """ @@ -117,7 +124,7 @@ class Algorithm: Une classe élémentaire d'algorithme doit implémenter la méthode "run". """ - def __init__(self): + def __init__(self, name): """ L'initialisation présente permet de fabriquer des variables de stockage disponibles de manière générique dans les algorithmes élémentaires. Ces @@ -143,7 +150,10 @@ class Algorithm: On peut rajouter des variables à stocker dans l'initialisation de l'algorithme élémentaire qui va hériter de cette classe """ - self._name = None + logging.debug("%s Initialisation"%str(name)) + self._name = str( name ) + self._parameters = {} + self.__required_parameters = {} self.StoredVariables = {} # self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ") @@ -194,6 +204,75 @@ class Algorithm: """ raise NotImplementedError("Mathematical assimilation calculation has not been implemented!") + def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None): + """ + Permet de définir dans l'algorithme des paramètres requis et leurs + caractéristiques par défaut. + """ + if name is None: + raise ValueError("A name is mandatory to define a required parameter.") + # + self.__required_parameters[name] = { + "default" : default, + "typecast" : typecast, + "minval" : minval, + "maxval" : maxval, + "listval" : listval, + "message" : message, + } + logging.debug("%s %s (valeur par défaut = %s)"%(self._name, message, self.setParameterValue(name))) + + def getRequiredParameters(self, noDetails=True): + """ + Renvoie la liste des noms de paramètres requis ou directement le + dictionnaire des paramètres requis. + """ + if noDetails: + ks = self.__required_parameters.keys() + ks.sort() + return ks + else: + return self.__required_parameters + + def setParameterValue(self, name=None, value=None): + """ + Renvoie la valeur d'un paramètre requis de manière contrôlée + """ + default = self.__required_parameters[name]["default"] + typecast = self.__required_parameters[name]["typecast"] + minval = self.__required_parameters[name]["minval"] + maxval = self.__required_parameters[name]["maxval"] + listval = self.__required_parameters[name]["listval"] + # + if value is None and default is None: + __val = None + elif value is None and default is not None: + if typecast is None: __val = default + else: __val = typecast( default ) + else: + if typecast is None: __val = value + else: __val = typecast( value ) + # + if minval is not None and __val < minval: + raise ValueError("The parameter named \"%s\" of value %s can not be less than %s."%(name, __val, minval)) + if maxval is not None and __val > maxval: + raise ValueError("The parameter named \"%s\" of value %s can not be greater than %s."%(name, __val, maxval)) + if listval is not None and __val not in listval: + raise ValueError("The parameter named \"%s\" of value %s has to be in the list %s."%(name, __val, listval)) + return __val + + def setParameters(self, fromDico={}): + """ + Permet de stocker les paramètres reçus dans le dictionnaire interne. + """ + self._parameters.update( fromDico ) + for k in self.__required_parameters.keys(): + if k in fromDico.keys(): + self._parameters[k] = self.setParameterValue(k,fromDico[k]) + else: + self._parameters[k] = self.setParameterValue(k) + logging.debug("%s %s : %s"%(self._name, self.__required_parameters[k]["message"], self._parameters[k])) + # ============================================================================== class Diagnostic: """ diff --git a/src/daComposant/daCore/Persistence.py b/src/daComposant/daCore/Persistence.py index 695bc3b..eed3626 100644 --- a/src/daComposant/daCore/Persistence.py +++ b/src/daComposant/daCore/Persistence.py @@ -312,20 +312,22 @@ class Persistence: return pairs # --------------------------------------------------------- - def mean(self): + def means(self): """ - Renvoie la valeur moyenne des données à chaque pas. Il faut que le type - de base soit compatible avec les types élémentaires numpy. + Renvoie la série, contenant à chaque pas, la valeur moyenne des données + au pas. Il faut que le type de base soit compatible avec les types + élémentaires numpy. """ try: return [numpy.matrix(item).mean() for item in self.__values] except: raise TypeError("Base type is incompatible with numpy") - def std(self, ddof=0): + def stds(self, ddof=0): """ - Renvoie l'écart-type des données à chaque pas. Il faut que le type de - base soit compatible avec les types élémentaires numpy. + Renvoie la série, contenant à chaque pas, l'écart-type des données + au pas. Il faut que le type de base soit compatible avec les types + élémentaires numpy. ddof : c'est le nombre de degrés de liberté pour le calcul de l'écart-type, qui est dans le diviseur. Inutile avant Numpy 1.1 @@ -338,30 +340,33 @@ class Persistence: except: raise TypeError("Base type is incompatible with numpy") - def sum(self): + def sums(self): """ - Renvoie la somme des données à chaque pas. Il faut que le type de - base soit compatible avec les types élémentaires numpy. + Renvoie la série, contenant à chaque pas, la somme des données au pas. + Il faut que le type de base soit compatible avec les types élémentaires + numpy. """ try: return [numpy.matrix(item).sum() for item in self.__values] except: raise TypeError("Base type is incompatible with numpy") - def min(self): + def mins(self): """ - Renvoie le minimum des données à chaque pas. Il faut que le type de - base soit compatible avec les types élémentaires numpy. + Renvoie la série, contenant à chaque pas, le minimum des données au pas. + Il faut que le type de base soit compatible avec les types élémentaires + numpy. """ try: return [numpy.matrix(item).min() for item in self.__values] except: raise TypeError("Base type is incompatible with numpy") - def max(self): + def maxs(self): """ - Renvoie le maximum des données à chaque pas. Il faut que le type de - base soit compatible avec les types élémentaires numpy. + Renvoie la série, contenant à chaque pas, la maximum des données au pas. + Il faut que le type de base soit compatible avec les types élémentaires + numpy. """ try: return [numpy.matrix(item).max() for item in self.__values] @@ -501,18 +506,21 @@ class Persistence: raw_input('Please press return to continue...\n') # --------------------------------------------------------- - def stepmean(self): + def mean(self): """ Renvoie la moyenne sur toutes les valeurs sans tenir compte de la longueur des pas. Il faut que le type de base soit compatible avec les types élémentaires numpy. """ try: - return numpy.matrix(self.__values).mean() + if self.__basetype in [int, float]: + return float( numpy.array(self.__values).mean() ) + else: + return numpy.array(self.__values).mean(axis=0) except: raise TypeError("Base type is incompatible with numpy") - def stepstd(self, ddof=0): + def std(self, ddof=0): """ Renvoie l'écart-type de toutes les valeurs sans tenir compte de la longueur des pas. Il faut que le type de base soit compatible avec @@ -523,42 +531,42 @@ class Persistence: """ try: if numpy.version.version >= '1.1.0': - return numpy.matrix(self.__values).std(ddof=ddof) + return numpy.array(self.__values).std(ddof=ddof,axis=0) else: - return numpy.matrix(self.__values).std() + return numpy.array(self.__values).std(axis=0) except: raise TypeError("Base type is incompatible with numpy") - def stepsum(self): + def sum(self): """ Renvoie la somme de toutes les valeurs sans tenir compte de la longueur des pas. Il faut que le type de base soit compatible avec les types élémentaires numpy. """ try: - return numpy.matrix(self.__values).sum() + return numpy.array(self.__values).sum(axis=0) except: raise TypeError("Base type is incompatible with numpy") - def stepmin(self): + def min(self): """ Renvoie le minimum de toutes les valeurs sans tenir compte de la longueur des pas. Il faut que le type de base soit compatible avec les types élémentaires numpy. """ try: - return numpy.matrix(self.__values).min() + return numpy.array(self.__values).min(axis=0) except: raise TypeError("Base type is incompatible with numpy") - def stepmax(self): + def max(self): """ Renvoie le maximum de toutes les valeurs sans tenir compte de la longueur des pas. Il faut que le type de base soit compatible avec les types élémentaires numpy. """ try: - return numpy.matrix(self.__values).max() + return numpy.array(self.__values).max(axis=0) except: raise TypeError("Base type is incompatible with numpy") @@ -569,7 +577,7 @@ class Persistence: les types élémentaires numpy. """ try: - return numpy.matrix(self.__values).cumsum(axis=0) + return numpy.array(self.__values).cumsum(axis=0) except: raise TypeError("Base type is incompatible with numpy") @@ -957,17 +965,17 @@ if __name__ == "__main__": print "La 2ème valeur :", OBJET_DE_TEST.valueserie(1) print "La dernière valeur :", OBJET_DE_TEST.valueserie(-1) print "Valeurs par pas :" + print " La moyenne :", OBJET_DE_TEST.means() + print " L'écart-type :", OBJET_DE_TEST.stds() + print " La somme :", OBJET_DE_TEST.sums() + print " Le minimum :", OBJET_DE_TEST.mins() + print " Le maximum :", OBJET_DE_TEST.maxs() + print "Valeurs globales :" print " La moyenne :", OBJET_DE_TEST.mean() print " L'écart-type :", OBJET_DE_TEST.std() print " La somme :", OBJET_DE_TEST.sum() print " Le minimum :", OBJET_DE_TEST.min() print " Le maximum :", OBJET_DE_TEST.max() - print "Valeurs globales :" - print " La moyenne :", OBJET_DE_TEST.stepmean() - print " L'écart-type :", OBJET_DE_TEST.stepstd() - print " La somme :", OBJET_DE_TEST.stepsum() - print " Le minimum :", OBJET_DE_TEST.stepmin() - print " Le maximum :", OBJET_DE_TEST.stepmax() print " La somme cumulée :", OBJET_DE_TEST.cumsum() print "Taille \"shape\" :", OBJET_DE_TEST.shape() print "Taille \"len\" :", len(OBJET_DE_TEST) @@ -1004,17 +1012,17 @@ if __name__ == "__main__": print "La 2ème valeur :", OBJET_DE_TEST.valueserie(1) print "La dernière valeur :", OBJET_DE_TEST.valueserie(-1) print "Valeurs par pas :" + print " La moyenne :", OBJET_DE_TEST.means() + print " L'écart-type :", OBJET_DE_TEST.stds() + print " La somme :", OBJET_DE_TEST.sums() + print " Le minimum :", OBJET_DE_TEST.mins() + print " Le maximum :", OBJET_DE_TEST.maxs() + print "Valeurs globales :" print " La moyenne :", OBJET_DE_TEST.mean() print " L'écart-type :", OBJET_DE_TEST.std() print " La somme :", OBJET_DE_TEST.sum() print " Le minimum :", OBJET_DE_TEST.min() print " Le maximum :", OBJET_DE_TEST.max() - print "Valeurs globales :" - print " La moyenne :", OBJET_DE_TEST.stepmean() - print " L'écart-type :", OBJET_DE_TEST.stepstd() - print " La somme :", OBJET_DE_TEST.stepsum() - print " Le minimum :", OBJET_DE_TEST.stepmin() - print " Le maximum :", OBJET_DE_TEST.stepmax() print " La somme cumulée :", OBJET_DE_TEST.cumsum() print "Taille \"shape\" :", OBJET_DE_TEST.shape() print "Taille \"len\" :", len(OBJET_DE_TEST) @@ -1045,17 +1053,17 @@ if __name__ == "__main__": print "La 2ème valeur :", OBJET_DE_TEST.valueserie(1) print "La dernière valeur :", OBJET_DE_TEST.valueserie(-1) print "Valeurs par pas :" + print " La moyenne :", OBJET_DE_TEST.means() + print " L'écart-type :", OBJET_DE_TEST.stds() + print " La somme :", OBJET_DE_TEST.sums() + print " Le minimum :", OBJET_DE_TEST.mins() + print " Le maximum :", OBJET_DE_TEST.maxs() + print "Valeurs globales :" print " La moyenne :", OBJET_DE_TEST.mean() print " L'écart-type :", OBJET_DE_TEST.std() print " La somme :", OBJET_DE_TEST.sum() print " Le minimum :", OBJET_DE_TEST.min() print " Le maximum :", OBJET_DE_TEST.max() - print "Valeurs globales :" - print " La moyenne :", OBJET_DE_TEST.stepmean() - print " L'écart-type :", OBJET_DE_TEST.stepstd() - print " La somme :", OBJET_DE_TEST.stepsum() - print " Le minimum :", OBJET_DE_TEST.stepmin() - print " Le maximum :", OBJET_DE_TEST.stepmax() print " La somme cumulée :", OBJET_DE_TEST.cumsum() print "Taille \"shape\" :", OBJET_DE_TEST.shape() print "Taille \"len\" :", len(OBJET_DE_TEST) @@ -1072,17 +1080,17 @@ if __name__ == "__main__": print "La 2ème valeur :", OBJET_DE_TEST.valueserie(1) print "La dernière valeur :", OBJET_DE_TEST.valueserie(-1) print "Valeurs par pas : attention, on peut les calculer car True=1, False=0, mais cela n'a pas de sens" + print " La moyenne :", OBJET_DE_TEST.means() + print " L'écart-type :", OBJET_DE_TEST.stds() + print " La somme :", OBJET_DE_TEST.sums() + print " Le minimum :", OBJET_DE_TEST.mins() + print " Le maximum :", OBJET_DE_TEST.maxs() + print "Valeurs globales : attention, on peut les calculer car True=1, False=0, mais cela n'a pas de sens" print " La moyenne :", OBJET_DE_TEST.mean() print " L'écart-type :", OBJET_DE_TEST.std() print " La somme :", OBJET_DE_TEST.sum() print " Le minimum :", OBJET_DE_TEST.min() print " Le maximum :", OBJET_DE_TEST.max() - print "Valeurs globales : attention, on peut les calculer car True=1, False=0, mais cela n'a pas de sens" - print " La moyenne :", OBJET_DE_TEST.stepmean() - print " L'écart-type :", OBJET_DE_TEST.stepstd() - print " La somme :", OBJET_DE_TEST.stepsum() - print " Le minimum :", OBJET_DE_TEST.stepmin() - print " Le maximum :", OBJET_DE_TEST.stepmax() print " La somme cumulée :", OBJET_DE_TEST.cumsum() print "Taille \"shape\" :", OBJET_DE_TEST.shape() print "Taille \"len\" :", len(OBJET_DE_TEST) diff --git a/src/daComposant/daDiagnostics/RMS.py b/src/daComposant/daDiagnostics/RMS.py index bbfc4d4..6a27f30 100644 --- a/src/daComposant/daDiagnostics/RMS.py +++ b/src/daComposant/daDiagnostics/RMS.py @@ -82,6 +82,6 @@ if __name__ == "__main__": D.calculate(vect1,vect2) print " Les valeurs de RMS attendues sont les suivantes : [1.0, 1.0, 1.0, 3.0, 0.53162016515553656, 0.73784217096601323]" print " Les RMS obtenues................................:", D.valueserie() - print " La moyenne......................................:", D.stepmean() + print " La moyenne......................................:", D.mean() print -- 2.39.2