From 6698df10c9d936ae980e975bf969c91952b0cc18 Mon Sep 17 00:00:00 2001 From: Jean-Philippe ARGAUD Date: Fri, 15 Apr 2022 17:00:33 +0200 Subject: [PATCH] Code improvements, review and simplifications (4) Static and dynamic checking --- src/daComposant/daAlgorithms/Atoms/ecwnlls.py | 2 +- src/daComposant/daAlgorithms/Atoms/etkf.py | 3 +- .../daAlgorithms/Atoms/incr3dvar.py | 2 +- src/daComposant/daAlgorithms/Atoms/mlef.py | 3 +- .../daAlgorithms/Atoms/psas3dvar.py | 2 +- src/daComposant/daAlgorithms/Atoms/senkf.py | 3 +- .../daAlgorithms/Atoms/std3dvar.py | 2 +- .../daAlgorithms/Atoms/std4dvar.py | 2 +- .../daAlgorithms/Atoms/van3dvar.py | 2 +- .../daAlgorithms/EnsembleKalmanFilter.py | 16 +- src/daComposant/daCore/Aidsm.py | 1 + src/daComposant/daCore/AssimilationStudy.py | 1 + src/daComposant/daCore/BasicObjects.py | 221 +++++++++++++----- src/daComposant/daCore/ExtendedLogging.py | 1 + src/daComposant/daCore/Interfaces.py | 67 ++++-- src/daComposant/daCore/NumericObjects.py | 77 ++++-- src/daComposant/daCore/Persistence.py | 37 +-- src/daComposant/daCore/PlatformInfo.py | 20 +- src/daComposant/daCore/Reporting.py | 8 +- src/daSalome/daYacsSchemaCreator/run.py | 3 +- 20 files changed, 338 insertions(+), 135 deletions(-) diff --git a/src/daComposant/daAlgorithms/Atoms/ecwnlls.py b/src/daComposant/daAlgorithms/Atoms/ecwnlls.py index 2282695..24bc8fe 100644 --- a/src/daComposant/daAlgorithms/Atoms/ecwnlls.py +++ b/src/daComposant/daAlgorithms/Atoms/ecwnlls.py @@ -132,7 +132,7 @@ def ecwnlls(selfA, Xb, Y, U, HO, CM, R, B, __storeState = False): nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber() # if selfA._parameters["Minimizer"] == "LBFGSB": - if "0.19" <= scipy.version.version <= "1.1.0": + if "0.19" <= scipy.version.version <= "1.4.1": import daAlgorithms.Atoms.lbfgsbhlt as optimiseur else: import scipy.optimize as optimiseur diff --git a/src/daComposant/daAlgorithms/Atoms/etkf.py b/src/daComposant/daAlgorithms/Atoms/etkf.py index 4cbcfa8..d75aa85 100644 --- a/src/daComposant/daAlgorithms/Atoms/etkf.py +++ b/src/daComposant/daAlgorithms/Atoms/etkf.py @@ -298,8 +298,7 @@ def etkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, ) # if Hybrid == "E3DVAR": - betaf = selfA._parameters["HybridCovarianceEquilibrium"] - Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf) + Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, selfA._parameters) # Xa = EnsembleMean( Xn ) #-------------------------- diff --git a/src/daComposant/daAlgorithms/Atoms/incr3dvar.py b/src/daComposant/daAlgorithms/Atoms/incr3dvar.py index 27765bf..0c031c1 100644 --- a/src/daComposant/daAlgorithms/Atoms/incr3dvar.py +++ b/src/daComposant/daAlgorithms/Atoms/incr3dvar.py @@ -134,7 +134,7 @@ def incr3dvar(selfA, Xb, Y, U, HO, CM, R, B, __storeState = False): # if selfA._parameters["Minimizer"] == "LBFGSB": # Minimum, J_optimal, Informations = scipy.optimize.fmin_l_bfgs_b( - if "0.19" <= scipy.version.version <= "1.1.0": + if "0.19" <= scipy.version.version <= "1.4.1": import daAlgorithms.Atoms.lbfgsbhlt as optimiseur else: import scipy.optimize as optimiseur diff --git a/src/daComposant/daAlgorithms/Atoms/mlef.py b/src/daComposant/daAlgorithms/Atoms/mlef.py index 945325b..1135a23 100644 --- a/src/daComposant/daAlgorithms/Atoms/mlef.py +++ b/src/daComposant/daAlgorithms/Atoms/mlef.py @@ -183,8 +183,7 @@ def mlef(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, ) # if Hybrid == "E3DVAR": - betaf = selfA._parameters["HybridCovarianceEquilibrium"] - Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf) + Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, selfA._parameters) # Xa = EnsembleMean( Xn ) #-------------------------- diff --git a/src/daComposant/daAlgorithms/Atoms/psas3dvar.py b/src/daComposant/daAlgorithms/Atoms/psas3dvar.py index ec136bd..23c9d4a 100644 --- a/src/daComposant/daAlgorithms/Atoms/psas3dvar.py +++ b/src/daComposant/daAlgorithms/Atoms/psas3dvar.py @@ -114,7 +114,7 @@ def psas3dvar(selfA, Xb, Y, U, HO, CM, R, B, __storeState = False): nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber() # if selfA._parameters["Minimizer"] == "LBFGSB": - if "0.19" <= scipy.version.version <= "1.1.0": + if "0.19" <= scipy.version.version <= "1.4.1": import daAlgorithms.Atoms.lbfgsbhlt as optimiseur else: import scipy.optimize as optimiseur diff --git a/src/daComposant/daAlgorithms/Atoms/senkf.py b/src/daComposant/daAlgorithms/Atoms/senkf.py index fee7075..2d0ae5f 100644 --- a/src/daComposant/daAlgorithms/Atoms/senkf.py +++ b/src/daComposant/daAlgorithms/Atoms/senkf.py @@ -181,8 +181,7 @@ def senkf(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, ) # if Hybrid == "E3DVAR": - betaf = selfA._parameters["HybridCovarianceEquilibrium"] - Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, betaf) + Xn = Apply3DVarRecentringOnEnsemble(Xn, EMX, Ynpu, HO, R, B, selfA._parameters) # Xa = EnsembleMean( Xn ) #-------------------------- diff --git a/src/daComposant/daAlgorithms/Atoms/std3dvar.py b/src/daComposant/daAlgorithms/Atoms/std3dvar.py index c055f77..c752c4c 100644 --- a/src/daComposant/daAlgorithms/Atoms/std3dvar.py +++ b/src/daComposant/daAlgorithms/Atoms/std3dvar.py @@ -117,7 +117,7 @@ def std3dvar(selfA, Xb, Y, U, HO, CM, R, B, __storeState = False): nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber() # if selfA._parameters["Minimizer"] == "LBFGSB": - if "0.19" <= scipy.version.version <= "1.1.0": + if "0.19" <= scipy.version.version <= "1.4.1": import daAlgorithms.Atoms.lbfgsbhlt as optimiseur else: import scipy.optimize as optimiseur diff --git a/src/daComposant/daAlgorithms/Atoms/std4dvar.py b/src/daComposant/daAlgorithms/Atoms/std4dvar.py index b16b1cd..0ea8013 100644 --- a/src/daComposant/daAlgorithms/Atoms/std4dvar.py +++ b/src/daComposant/daAlgorithms/Atoms/std4dvar.py @@ -178,7 +178,7 @@ def std4dvar(selfA, Xb, Y, U, HO, EM, CM, R, B, Q): nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber() # if selfA._parameters["Minimizer"] == "LBFGSB": - if "0.19" <= scipy.version.version <= "1.1.0": + if "0.19" <= scipy.version.version <= "1.4.1": import daAlgorithms.Atoms.lbfgsbhlt as optimiseur else: import scipy.optimize as optimiseur diff --git a/src/daComposant/daAlgorithms/Atoms/van3dvar.py b/src/daComposant/daAlgorithms/Atoms/van3dvar.py index d095d3f..d32abdb 100644 --- a/src/daComposant/daAlgorithms/Atoms/van3dvar.py +++ b/src/daComposant/daAlgorithms/Atoms/van3dvar.py @@ -126,7 +126,7 @@ def van3dvar(selfA, Xb, Y, U, HO, CM, R, B, __storeState = False): nbPreviousSteps = selfA.StoredVariables["CostFunctionJ"].stepnumber() # if selfA._parameters["Minimizer"] == "LBFGSB": - if "0.19" <= scipy.version.version <= "1.1.0": + if "0.19" <= scipy.version.version <= "1.4.1": import daAlgorithms.Atoms.lbfgsbhlt as optimiseur else: import scipy.optimize as optimiseur diff --git a/src/daComposant/daAlgorithms/EnsembleKalmanFilter.py b/src/daComposant/daAlgorithms/EnsembleKalmanFilter.py index 19be017..26e7ddc 100644 --- a/src/daComposant/daAlgorithms/EnsembleKalmanFilter.py +++ b/src/daComposant/daAlgorithms/EnsembleKalmanFilter.py @@ -112,10 +112,24 @@ class ElementaryAlgorithm(BasicObjects.Algorithm): name = "HybridCovarianceEquilibrium", default = 0.5, typecast = float, - message = "Facteur d'équilibre entre la covariance statique et la covariance d'ensemble", + message = "Facteur d'équilibre entre la covariance statique et la covariance d'ensemble en hybride variationnel", minval = 0., maxval = 1., ) + self.defineRequiredParameter( + name = "HybridMaximumNumberOfIterations", + default = 15000, + typecast = int, + message = "Nombre maximal de pas d'optimisation en hybride variationnel", + minval = -1, + ) + self.defineRequiredParameter( + name = "HybridCostDecrementTolerance", + default = 1.e-7, + typecast = float, + message = "Diminution relative minimale du coût lors de l'arrêt en hybride variationnel", + minval = 0., + ) self.defineRequiredParameter( name = "SetSeed", typecast = numpy.random.seed, diff --git a/src/daComposant/daCore/Aidsm.py b/src/daComposant/daCore/Aidsm.py index 31e18e3..99e01ef 100644 --- a/src/daComposant/daCore/Aidsm.py +++ b/src/daComposant/daCore/Aidsm.py @@ -864,6 +864,7 @@ class Aidsm(object): else: return self.__StoredInputs + # ============================================================================== if __name__ == "__main__": print('\n AUTODIAGNOSTIC\n') diff --git a/src/daComposant/daCore/AssimilationStudy.py b/src/daComposant/daCore/AssimilationStudy.py index 2d28e14..4af1e0a 100644 --- a/src/daComposant/daCore/AssimilationStudy.py +++ b/src/daComposant/daCore/AssimilationStudy.py @@ -36,6 +36,7 @@ class AssimilationStudy(_Aidsm): def __init__(self, name = ""): _Aidsm.__init__(self, name) + # ============================================================================== if __name__ == "__main__": print('\n AUTODIAGNOSTIC\n') diff --git a/src/daComposant/daCore/BasicObjects.py b/src/daComposant/daCore/BasicObjects.py index a219937..d5b6952 100644 --- a/src/daComposant/daCore/BasicObjects.py +++ b/src/daComposant/daCore/BasicObjects.py @@ -247,7 +247,11 @@ class Operator(object): else: _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF if not hasattr(_hserie, "pop"): - raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.") + raise TypeError( + "The user input multi-function doesn't seem to return a"+\ + " result sequence, behaving like a mono-function. It has"+\ + " to be checked." + ) for i in _hindex: _xv = _xserie.pop(0) _hv = _hserie.pop(0) @@ -496,19 +500,16 @@ class FullOperator(object): __Function = asThreeFunctions __Function.update({"useApproximatedDerivatives":True}) else: - raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")") + raise ValueError( + "The functions has to be given in a dictionnary which have either"+\ + " 1 key (\"Direct\") or"+\ + " 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")") if "Direct" not in asThreeFunctions: __Function["Direct"] = asThreeFunctions["Tangent"] __Function.update(__Parameters) else: __Function = None # - # if sys.version_info[0] < 3 and isinstance(__Function, dict): - #  for k in ("Direct", "Tangent", "Adjoint"): - #  if k in __Function and hasattr(__Function[k],"__class__"): - #  if type(__Function[k]) is type(self.__init__): - #  raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k)) - # if appliedInX is not None and isinstance(appliedInX, dict): __appliedInX = appliedInX elif appliedInX is not None: @@ -547,25 +548,83 @@ class FullOperator(object): mpWorkers = __Function["NumberOfProcesses"], mfEnabled = __Function["withmfEnabled"], ) - self.__FO["Direct"] = Operator( name = self.__name, fromMethod = FDA.DirectOperator, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] ) - self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = FDA.TangentOperator, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs ) - self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = FDA.AdjointOperator, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs ) + self.__FO["Direct"] = Operator( + name = self.__name, + fromMethod = FDA.DirectOperator, + reducingMemoryUse = __reduceM, + avoidingRedundancy = __avoidRC, + inputAsMultiFunction = inputAsMF, + extraArguments = self.__extraArgs, + enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] ) + self.__FO["Tangent"] = Operator( + name = self.__name+"Tangent", + fromMethod = FDA.TangentOperator, + reducingMemoryUse = __reduceM, + avoidingRedundancy = __avoidRC, + inputAsMultiFunction = inputAsMF, + extraArguments = self.__extraArgs ) + self.__FO["Adjoint"] = Operator( + name = self.__name+"Adjoint", + fromMethod = FDA.AdjointOperator, + reducingMemoryUse = __reduceM, + avoidingRedundancy = __avoidRC, + inputAsMultiFunction = inputAsMF, + extraArguments = self.__extraArgs ) elif isinstance(__Function, dict) and \ ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \ (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None): - self.__FO["Direct"] = Operator( name = self.__name, fromMethod = __Function["Direct"], reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] ) - self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = __Function["Tangent"], reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs ) - self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = __Function["Adjoint"], reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs ) + self.__FO["Direct"] = Operator( + name = self.__name, + fromMethod = __Function["Direct"], + reducingMemoryUse = __reduceM, + avoidingRedundancy = __avoidRC, + inputAsMultiFunction = inputAsMF, + extraArguments = self.__extraArgs, + enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] ) + self.__FO["Tangent"] = Operator( + name = self.__name+"Tangent", + fromMethod = __Function["Tangent"], + reducingMemoryUse = __reduceM, + avoidingRedundancy = __avoidRC, + inputAsMultiFunction = inputAsMF, + extraArguments = self.__extraArgs ) + self.__FO["Adjoint"] = Operator( + name = self.__name+"Adjoint", + fromMethod = __Function["Adjoint"], + reducingMemoryUse = __reduceM, + avoidingRedundancy = __avoidRC, + inputAsMultiFunction = inputAsMF, + extraArguments = self.__extraArgs ) elif asMatrix is not None: if isinstance(__Matrix, str): __Matrix = PlatformInfo.strmatrix2liststr( __Matrix ) __matrice = numpy.asarray( __Matrix, dtype=float ) - self.__FO["Direct"] = Operator( name = self.__name, fromMatrix = __matrice, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] ) - self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMatrix = __matrice, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF ) - self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMatrix = __matrice.T, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF ) + self.__FO["Direct"] = Operator( + name = self.__name, + fromMatrix = __matrice, + reducingMemoryUse = __reduceM, + avoidingRedundancy = __avoidRC, + inputAsMultiFunction = inputAsMF, + enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] ) + self.__FO["Tangent"] = Operator( + name = self.__name+"Tangent", + fromMatrix = __matrice, + reducingMemoryUse = __reduceM, + avoidingRedundancy = __avoidRC, + inputAsMultiFunction = inputAsMF ) + self.__FO["Adjoint"] = Operator( + name = self.__name+"Adjoint", + fromMatrix = __matrice.T, + reducingMemoryUse = __reduceM, + avoidingRedundancy = __avoidRC, + inputAsMultiFunction = inputAsMF ) del __matrice else: - raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a matrix, a Direct operator for approximate derivatives or a Tangent/Adjoint operators pair. Please check your operator input."%self.__name) + raise ValueError( + "The %s object is improperly defined or undefined,"%self.__name+\ + " it requires at minima either a matrix, a Direct operator for"+\ + " approximate derivatives or a Tangent/Adjoint operators pair."+\ + " Please check your operator input.") # if __appliedInX is not None: self.__FO["AppliedInX"] = {} @@ -729,7 +788,7 @@ class Algorithm(object): self.__setParameters(Parameters, reset=True) for k, v in self.__variable_names_not_public.items(): if k not in self._parameters: self.__setParameters( {k:v} ) - # + # Corrections et compléments des vecteurs def __test_vvalue(argument, variable, argname, symbol=None): if symbol is None: symbol = variable @@ -746,12 +805,14 @@ class Algorithm(object): elif variable in self.__required_inputs["RequiredInputValues"]["optional"]: logging.debug("%s %s vector %s is optional and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size)) else: - logging.debug("%s %s vector %s is set although neither required nor optional, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size)) + logging.debug( + "%s %s vector %s is set although neither required nor optional, and its size is %i."%( + self._name,argname,symbol,numpy.array(argument).size)) return 0 __test_vvalue( Xb, "Xb", "Background or initial state" ) __test_vvalue( Y, "Y", "Observation" ) __test_vvalue( U, "U", "Control" ) - # + # Corrections et compléments des covariances def __test_cvalue(argument, variable, argname, symbol=None): if symbol is None: symbol = variable @@ -773,7 +834,7 @@ class Algorithm(object): __test_cvalue( B, "B", "Background" ) __test_cvalue( R, "R", "Observation" ) __test_cvalue( Q, "Q", "Evolution" ) - # + # Corrections et compléments des opérateurs def __test_ovalue(argument, variable, argname, symbol=None): if symbol is None: symbol = variable @@ -801,7 +862,9 @@ class Algorithm(object): logging.debug("%s Bounds taken into account"%(self._name,)) else: self._parameters["Bounds"] = None - if ("StateBoundsForQuantiles" in self._parameters) and isinstance(self._parameters["StateBoundsForQuantiles"], (list, tuple)) and (len(self._parameters["StateBoundsForQuantiles"]) > 0): + if ("StateBoundsForQuantiles" in self._parameters) \ + and isinstance(self._parameters["StateBoundsForQuantiles"], (list, tuple)) \ + and (len(self._parameters["StateBoundsForQuantiles"]) > 0): logging.debug("%s Bounds for quantiles states taken into account"%(self._name,)) # Attention : contrairement à Bounds, pas de défaut à None, sinon on ne peut pas être sans bornes # @@ -847,8 +910,12 @@ class Algorithm(object): _C = numpy.dot(_EI, numpy.dot(_A, _EI)) self.StoredVariables["APosterioriCorrelations"].store( _C ) if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH: - logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0)) - logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3)) + logging.debug( + "%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", + self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0)) + logging.debug( + "%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", + self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3)) logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio")) logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1]) logging.debug("%s Terminé", self._name) @@ -892,12 +959,12 @@ class Algorithm(object): else: try: msg = "'%s'"%k - except: + except Exception: raise TypeError("pop expected at least 1 arguments, got 0") "If key is not found, d is returned if given, otherwise KeyError is raised" try: return d - except: + except Exception: raise KeyError(msg) def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None): @@ -907,7 +974,17 @@ class Algorithm(object): """ raise NotImplementedError("Mathematical assimilation calculation has not been implemented!") - def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None, listadv = None, oldname = None): + def defineRequiredParameter(self, + name = None, + default = None, + typecast = None, + message = None, + minval = None, + maxval = None, + listval = None, + listadv = None, + oldname = None, + ): """ Permet de définir dans l'algorithme des paramètres requis et leurs caractéristiques par défaut. @@ -963,7 +1040,7 @@ class Algorithm(object): else: try: __val = typecast( value ) - except: + except Exception: raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast)) # if minval is not None and (numpy.array(__val, float) < minval).any(): @@ -1018,7 +1095,8 @@ class Algorithm(object): for k in __inverse_fromDico_keys.values(): if k.lower() in self.__replace_by_the_new_name: __newk = self.__replace_by_the_new_name[k.lower()] - __msg = "the parameter '%s' used in '%s' algorithm case is deprecated and has to be replaced by '%s'. Please update your code."%(k,self._name,__newk) + __msg = "the parameter \"%s\" used in \"%s\" algorithm case is deprecated and has to be replaced by \"%s\"."%(k,self._name,__newk) + __msg += " Please update your code." warnings.warn(__msg, FutureWarning, stacklevel=50) # for k in self.__required_parameters.keys(): @@ -1232,7 +1310,7 @@ class AlgorithmAndParameters(object): try: catalogAd = r.loadCatalog("proc", __file) r.addCatalog(catalogAd) - except: + except Exception: pass try: @@ -1362,7 +1440,8 @@ class AlgorithmAndParameters(object): if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')): module_path = os.path.abspath(os.path.join(directory, daDirectory)) if module_path is None: - raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path)) + raise ImportError( + "No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path)) # # Importe le fichier complet comme un module # ------------------------------------------ @@ -1374,7 +1453,8 @@ class AlgorithmAndParameters(object): self.__algorithmName = str(choice) sys.path = sys_path_tmp ; del sys_path_tmp except ImportError as e: - raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e)) + raise ImportError( + "The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e)) # # Instancie un objet du type élémentaire du fichier # ------------------------------------------------- @@ -1463,13 +1543,21 @@ class AlgorithmAndParameters(object): raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,)) # if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ): - raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape)) + raise ValueError( + "Shape characteristic of observation operator (H)"+\ + " \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape)) if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ): - raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape)) + raise ValueError( + "Shape characteristic of observation operator (H)"+\ + " \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape)) if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ): - raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape)) + raise ValueError( + "Shape characteristic of observation operator (H)"+\ + " \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape)) if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ): - raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape)) + raise ValueError( + "Shape characteristic of observation operator (H)"+\ + " \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape)) # if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ): if self.__algorithmName in ["EnsembleBlue",]: @@ -1479,16 +1567,24 @@ class AlgorithmAndParameters(object): self.__Xb.store( numpy.asarray(member, dtype=float) ) __Xb_shape = min(__B_shape) else: - raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape)) + raise ValueError( + "Shape characteristic of a priori errors covariance matrix (B)"+\ + " \"%s\" and background vector (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape)) # if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ): - raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape)) + raise ValueError( + "Shape characteristic of observation errors covariance matrix (R)"+\ + " \"%s\" and observation vector (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape)) # if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ): - raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape)) + raise ValueError( + "Shape characteristic of evolution model (EM)"+\ + " \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape)) # if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ): - raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape)) + raise ValueError( + "Shape characteristic of control model (CM)"+\ + " \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape)) # if ("Bounds" in self.__P) \ and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \ @@ -1789,7 +1885,10 @@ class State(object): self.shape = (self.shape[0],1) self.size = self.shape[0] * self.shape[1] else: - raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name) + raise ValueError( + "The %s object is improperly defined or undefined,"%self.__name+\ + " it requires at minima either a vector, a list/tuple of"+\ + " vectors or a persistent object. Please check your vector input.") # if scheduledBy is not None: self.__T = scheduledBy @@ -1876,7 +1975,10 @@ class Covariance(object): __Scalar = PlatformInfo.strvect2liststr( __Scalar ) if len(__Scalar) > 0: __Scalar = __Scalar[0] if numpy.array(__Scalar).size != 1: - raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.array(__Scalar).size) + raise ValueError( + " The diagonal multiplier given to define a sparse matrix is"+\ + " not a unique scalar value.\n Its actual measured size is"+\ + " %i. Please check your scalar input."%numpy.array(__Scalar).size) self.__is_scalar = True self.__C = numpy.abs( float(__Scalar) ) self.shape = (0,0) @@ -1909,7 +2011,6 @@ class Covariance(object): self.size = 0 else: pass - # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name) # self.__validate() @@ -1928,12 +2029,12 @@ class Covariance(object): if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING): try: numpy.linalg.cholesky( self.__C ) - except: + except Exception: raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,)) if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING): try: self.__C.cholesky() - except: + except Exception: raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,)) def isscalar(self): @@ -2168,14 +2269,16 @@ class Covariance(object): elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice return self.__C * numpy.asmatrix(other) else: - raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name)) + raise ValueError( + "operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name)) elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)): if numpy.ravel(other).size == self.shape[1]: # Vecteur return numpy.asmatrix(self.__C * numpy.ravel(other)).T elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose()) else: - raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name)) + raise ValueError( + "operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name)) elif self.isscalar() and isinstance(other,numpy.matrix): return self.__C * other elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)): @@ -2186,7 +2289,8 @@ class Covariance(object): elif self.isobject(): return self.__C.__mul__(other) else: - raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other))) + raise NotImplementedError( + "%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other))) def __rmatmul__(self, other): "x.__rmul__(y) <==> y@x" @@ -2198,20 +2302,23 @@ class Covariance(object): elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice return numpy.asmatrix(other) * self.__C else: - raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name)) + raise ValueError( + "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name)) elif self.isvector() and isinstance(other,numpy.matrix): if numpy.ravel(other).size == self.shape[0]: # Vecteur return numpy.asmatrix(numpy.ravel(other) * self.__C) elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice return numpy.asmatrix(numpy.array(other) * self.__C) else: - raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name)) + raise ValueError( + "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name)) elif self.isscalar() and isinstance(other,numpy.matrix): return other * self.__C elif self.isobject(): return self.__C.__rmatmul__(other) else: - raise NotImplementedError("%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name,type(other))) + raise NotImplementedError( + "%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name,type(other))) def __rmul__(self, other): "x.__rmul__(y) <==> y*x" @@ -2223,14 +2330,16 @@ class Covariance(object): elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice return numpy.asmatrix(other) * self.__C else: - raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name)) + raise ValueError( + "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name)) elif self.isvector() and isinstance(other,numpy.matrix): if numpy.ravel(other).size == self.shape[0]: # Vecteur return numpy.asmatrix(numpy.ravel(other) * self.__C) elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice return numpy.asmatrix(numpy.array(other) * self.__C) else: - raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name)) + raise ValueError( + "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name)) elif self.isscalar() and isinstance(other,numpy.matrix): return other * self.__C elif self.isscalar() and isinstance(other,float): @@ -2238,7 +2347,8 @@ class Covariance(object): elif self.isobject(): return self.__C.__rmul__(other) else: - raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other))) + raise NotImplementedError( + "%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other))) def __len__(self): "x.__len__() <==> len(x)" @@ -2369,6 +2479,7 @@ def MultiFonction( # logging.debug("MULTF Internal multifonction calculations end") return __multiHX + # ============================================================================== if __name__ == "__main__": print('\n AUTODIAGNOSTIC\n') diff --git a/src/daComposant/daCore/ExtendedLogging.py b/src/daComposant/daCore/ExtendedLogging.py index 2da4d7e..c9e31e7 100644 --- a/src/daComposant/daCore/ExtendedLogging.py +++ b/src/daComposant/daCore/ExtendedLogging.py @@ -158,6 +158,7 @@ def logtimer(f): return result return wrapper + # ============================================================================== if __name__ == "__main__": print('\n AUTODIAGNOSTIC\n') diff --git a/src/daComposant/daCore/Interfaces.py b/src/daComposant/daCore/Interfaces.py index d0f2039..175b9b6 100644 --- a/src/daComposant/daCore/Interfaces.py +++ b/src/daComposant/daCore/Interfaces.py @@ -75,8 +75,8 @@ class GenericCaseViewer(object): def _finalize(self, __upa=None): "Enregistrement du final" __hasNotExecute = True - for l in self._lineSerie: - if "%s.execute"%(self._objname,) in l: __hasNotExecute = False + for __l in self._lineSerie: + if "%s.execute"%(self._objname,) in __l: __hasNotExecute = False if __hasNotExecute: self._lineSerie.append("%s.execute()"%(self._objname,)) if __upa is not None and len(__upa)>0: @@ -253,7 +253,7 @@ class _COMViewer(GenericCaseViewer): elif 'SCRIPT_FILE' in r and os.path.exists(r['SCRIPT_FILE']): __UserPostAnalysis = open(r['SCRIPT_FILE'],'r').read() __commands.append( "set( Concept='UserPostAnalysis', Script='%s' )"%(r['SCRIPT_FILE'],) ) - elif 'Template' in r and not 'ValueTemplate' in r: + elif 'Template' in r and 'ValueTemplate' not in r: # AnalysisPrinter... if r['Template'] not in Templates.UserPostAnalysisTemplates: raise ValueError("User post-analysis template \"%s\" does not exist."%(r['Template'],)) @@ -453,8 +453,10 @@ class _SCDViewer(GenericCaseViewer): __local.pop(__k) for __k,__v in __local.items(): if __k == "Concept": continue - if __k in ['ScalarSparseMatrix','DiagonalSparseMatrix','Matrix','OneFunction','ThreeFunctions'] and 'Script' in __local and __local['Script'] is not None: continue - if __k in ['Vector','VectorSerie'] and 'DataFile' in __local and __local['DataFile'] is not None: continue + if __k in ['ScalarSparseMatrix','DiagonalSparseMatrix','Matrix','OneFunction','ThreeFunctions'] \ + and 'Script' in __local and __local['Script'] is not None: continue + if __k in ['Vector','VectorSerie'] \ + and 'DataFile' in __local and __local['DataFile'] is not None: continue if __k == 'Parameters' and not (__command in ['AlgorithmParameters','SupplementaryParameters']): continue if __k == 'Algorithm': __text += "study_config['Algorithm'] = %s\n"%(repr(__v)) @@ -709,7 +711,9 @@ class ImportFromScript(object): if __filename is None: raise ValueError("The name of the file, containing the variable to be read, has to be specified.") if not os.path.isfile(__filename): - raise ValueError("The file containing the variable to be imported doesn't seem to exist. Please check the file. The given file name is:\n \"%s\""%str(__filename)) + raise ValueError( + "The file containing the variable to be imported doesn't seem to"+\ + " exist. Please check the file. The given file name is:\n \"%s\""%str(__filename)) if os.path.dirname(__filename) != '': sys.path.insert(0, os.path.dirname(__filename)) __basename = os.path.basename(__filename).rstrip(".py") @@ -729,9 +733,15 @@ class ImportFromScript(object): raise ValueError("The name of the variable to be read has to be specified. Please check the content of the file and the syntax.") if not hasattr(self.__filenspace, __varname): if __synonym is None: - raise ValueError("The imported script file \"%s\" doesn't contain the mandatory variable \"%s\" to be read. Please check the content of the file and the syntax."%(str(self.__basename)+".py",__varname)) + raise ValueError( + "The imported script file \"%s\""%(str(self.__basename)+".py",)+\ + " doesn't contain the mandatory variable \"%s\""%(__varname,)+\ + " to be read. Please check the content of the file and the syntax.") elif not hasattr(self.__filenspace, __synonym): - raise ValueError("The imported script file \"%s\" doesn't contain the mandatory variable \"%s\" to be read. Please check the content of the file and the syntax."%(str(self.__basename)+".py",__synonym)) + raise ValueError( + "The imported script file \"%s\""%(str(self.__basename)+".py",)+\ + " doesn't contain the mandatory variable \"%s\""%(__synonym,)+\ + " to be read. Please check the content of the file and the syntax.") else: return getattr(self.__filenspace, __synonym) else: @@ -747,8 +757,10 @@ class ImportDetector(object): """ __slots__ = ( "__url", "__usr", "__root", "__end") - def __enter__(self): return self - def __exit__(self, exc_type, exc_val, exc_tb): return False + def __enter__(self): + return self + def __exit__(self, exc_type, exc_val, exc_tb): + return False # def __init__(self, __url, UserMime=""): if __url is None: @@ -844,8 +856,10 @@ class ImportFromFile(object): "_filename", "_colnames", "_colindex", "_varsline", "_format", "_delimiter", "_skiprows", "__url", "__filestring", "__header", "__allowvoid", "__binaryformats", "__supportedformats") - def __enter__(self): return self - def __exit__(self, exc_type, exc_val, exc_tb): return False + def __enter__(self): + return self + def __exit__(self, exc_type, exc_val, exc_tb): + return False # def __init__(self, Filename=None, ColNames=None, ColIndex=None, Format="Guess", AllowVoidNameList=True): """ @@ -1029,7 +1043,7 @@ class ImportFromFile(object): else: raise ValueError("Unkown file format \"%s\" or no reader available"%self._format) if __columns is None: __columns = () - # + def toString(value): try: return value.decode() @@ -1061,8 +1075,10 @@ class ImportScalarLinesFromFile(ImportFromFile): Seule la méthode "getvalue" est changée. """ - def __enter__(self): return self - def __exit__(self, exc_type, exc_val, exc_tb): return False + def __enter__(self): + return self + def __exit__(self, exc_type, exc_val, exc_tb): + return False # def __init__(self, Filename=None, ColNames=None, ColIndex=None, Format="Guess"): ImportFromFile.__init__(self, Filename, ColNames, ColIndex, Format) @@ -1079,9 +1095,11 @@ class ImportScalarLinesFromFile(ImportFromFile): __dtypes = {'names' : ('Name', 'Value', 'Minimum', 'Maximum'), 'formats': ('S128', 'g', 'g', 'g')} __usecols = (0, 1, 2, 3) + def __replaceNoneN( s ): if s.strip() == b'None': return numpy.NINF else: return s + def __replaceNoneP( s ): if s.strip() == b'None': return numpy.PINF else: return s @@ -1097,6 +1115,7 @@ class ImportScalarLinesFromFile(ImportFromFile): __dtypes = {'names' : HeaderNames, 'formats': tuple(['S128',]+['g']*(len(HeaderNames)-1))} __usecols = tuple(range(len(HeaderNames))) + def __replaceNone( s ): if s.strip() == b'None': return numpy.NAN else: return s @@ -1107,9 +1126,22 @@ class ImportScalarLinesFromFile(ImportFromFile): raise ValueError("Can not find names of columns for initial values. Wrong first line is:\n \"%s\""%self._varsline) # if self._format == "text/plain": - __content = numpy.loadtxt(self._filename, dtype = __dtypes, usecols = __usecols, skiprows = self._skiprows, converters = __converters) + __content = numpy.loadtxt( + self._filename, + dtype = __dtypes, + usecols = __usecols, + skiprows = self._skiprows, + converters = __converters, + ) elif self._format in ["text/csv", "text/tab-separated-values"]: - __content = numpy.loadtxt(self._filename, dtype = __dtypes, usecols = __usecols, skiprows = self._skiprows, converters = __converters, delimiter = self._delimiter) + __content = numpy.loadtxt( + self._filename, + dtype = __dtypes, + usecols = __usecols, + skiprows = self._skiprows, + converters = __converters, + delimiter = self._delimiter, + ) else: raise ValueError("Unkown file format \"%s\""%self._format) # @@ -1224,6 +1256,7 @@ class EficasGUI(object): else: logging.debug("Can not launch standalone EFICAS/ADAO interface for path errors.") + # ============================================================================== if __name__ == "__main__": print('\n AUTODIAGNOSTIC\n') diff --git a/src/daComposant/daCore/NumericObjects.py b/src/daComposant/daCore/NumericObjects.py index 03e833d..dfeaab6 100644 --- a/src/daComposant/daCore/NumericObjects.py +++ b/src/daComposant/daCore/NumericObjects.py @@ -116,33 +116,48 @@ class FDApproximation(object): self.__userFunction__name = Function.__name__ try: mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename']) - except: + except Exception: mod = os.path.abspath(Function.__globals__['__file__']) if not os.path.isfile(mod): raise ImportError("No user defined function or method found with the name %s"%(mod,)) self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','') self.__userFunction__path = os.path.dirname(mod) del mod - self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs ) + self.__userOperator = Operator( + name = self.__name, + fromMethod = Function, + avoidingRedundancy = self.__avoidRC, + inputAsMultiFunction = self.__mfEnabled, + extraArguments = self.__extraArgs ) self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct elif isinstance(Function,types.MethodType): logging.debug("FDA Calculs en multiprocessing : MethodType") self.__userFunction__name = Function.__name__ try: mod = os.path.join(Function.__globals__['filepath'],Function.__globals__['filename']) - except: + except Exception: mod = os.path.abspath(Function.__func__.__globals__['__file__']) if not os.path.isfile(mod): raise ImportError("No user defined function or method found with the name %s"%(mod,)) self.__userFunction__modl = os.path.basename(mod).replace('.pyc','').replace('.pyo','').replace('.py','') self.__userFunction__path = os.path.dirname(mod) del mod - self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs ) + self.__userOperator = Operator( + name = self.__name, + fromMethod = Function, + avoidingRedundancy = self.__avoidRC, + inputAsMultiFunction = self.__mfEnabled, + extraArguments = self.__extraArgs ) self.__userFunction = self.__userOperator.appliedTo # Pour le calcul Direct else: raise TypeError("User defined function or method has to be provided for finite differences approximation.") else: - self.__userOperator = Operator( name = self.__name, fromMethod = Function, avoidingRedundancy = self.__avoidRC, inputAsMultiFunction = self.__mfEnabled, extraArguments = self.__extraArgs ) + self.__userOperator = Operator( + name = self.__name, + fromMethod = Function, + avoidingRedundancy = self.__avoidRC, + inputAsMultiFunction = self.__mfEnabled, + extraArguments = self.__extraArgs ) self.__userFunction = self.__userOperator.appliedTo # self.__centeredDF = bool(centeredDF) @@ -156,12 +171,12 @@ class FDApproximation(object): self.__dX = numpy.ravel( dX ) # --------------------------------------------------------- - def __doublon__(self, e, l, n, v=None): + def __doublon__(self, __e, __l, __n, __v=None): __ac, __iac = False, -1 - for i in range(len(l)-1,-1,-1): - if numpy.linalg.norm(e - l[i]) < self.__tolerBP * n[i]: + for i in range(len(__l)-1,-1,-1): + if numpy.linalg.norm(__e - __l[i]) < self.__tolerBP * __n[i]: __ac, __iac = True, i - if v is not None: logging.debug("FDA Cas%s déja calculé, récupération du doublon %i"%(v,__iac)) + if __v is not None: logging.debug("FDA Cas%s déjà calculé, récupération du doublon %i"%(__v,__iac)) break return __ac, __iac @@ -313,7 +328,7 @@ class FDApproximation(object): _xserie.append( _X_moins_dXi ) # _HX_plusmoins_dX = self.DirectOperator( _xserie ) - # + # _Jacobienne = [] for i in range( len(_dX) ): _Jacobienne.append( numpy.ravel( _HX_plusmoins_dX[2*i] - _HX_plusmoins_dX[2*i+1] ) / (2.*_dX[i]) ) @@ -376,7 +391,7 @@ class FDApproximation(object): _Jacobienne = [] for i in range( len(_dX) ): _Jacobienne.append( numpy.ravel(( _HX_plus_dX[i] - _HX ) / _dX[i]) ) - # + # else: _Jacobienne = [] _HX = self.DirectOperator( _X ) @@ -736,14 +751,24 @@ def HessienneEstimation(__selfA, __nb, __HaM, __HtM, __BI, __RI): __A = __A + mpr*numpy.trace( __A ) * numpy.identity(__nb) # Positivité # if min(__A.shape) != max(__A.shape): - raise ValueError("The %s a posteriori covariance matrix A is of shape %s, despites it has to be a squared matrix. There is an error in the observation operator, please check it."%(__selfA._name,str(__A.shape))) + raise ValueError( + "The %s a posteriori covariance matrix A"%(__selfA._name,)+\ + " is of shape %s, despites it has to be a"%(str(__A.shape),)+\ + " squared matrix. There is an error in the observation operator,"+\ + " please check it.") if (numpy.diag(__A) < 0).any(): - raise ValueError("The %s a posteriori covariance matrix A has at least one negative value on its diagonal. There is an error in the observation operator, please check it."%(__selfA._name,)) + raise ValueError( + "The %s a posteriori covariance matrix A"%(__selfA._name,)+\ + " has at least one negative value on its diagonal. There is an"+\ + " error in the observation operator, please check it.") if logging.getLogger().level < logging.WARNING: # La vérification n'a lieu qu'en debug try: numpy.linalg.cholesky( __A ) - except: - raise ValueError("The %s a posteriori covariance matrix A is not symmetric positive-definite. Please check your a priori covariances and your observation operator."%(__selfA._name,)) + except Exception: + raise ValueError( + "The %s a posteriori covariance matrix A"%(__selfA._name,)+\ + " is not symmetric positive-definite. Please check your a"+\ + " priori covariances and your observation operator.") # return __A @@ -807,16 +832,20 @@ def QuantilesEstimations(selfA, A, Xa, HXa = None, Hm = None, HtM = None): return 0 # ============================================================================== -def ForceNumericBounds( __Bounds ): +def ForceNumericBounds( __Bounds, __infNumbers = True ): "Force les bornes à être des valeurs numériques, sauf si globalement None" # Conserve une valeur par défaut à None s'il n'y a pas de bornes if __Bounds is None: return None - # Converti toutes les bornes individuelles None à +/- l'infini + # Converti toutes les bornes individuelles None à +/- l'infini chiffré __Bounds = numpy.asarray( __Bounds, dtype=float ) if len(__Bounds.shape) != 2 or min(__Bounds.shape) <= 0 or __Bounds.shape[1] != 2: raise ValueError("Incorrectly shaped bounds data") - __Bounds[numpy.isnan(__Bounds[:,0]),0] = -sys.float_info.max - __Bounds[numpy.isnan(__Bounds[:,1]),1] = sys.float_info.max + if __infNumbers: + __Bounds[numpy.isnan(__Bounds[:,0]),0] = -float('inf') + __Bounds[numpy.isnan(__Bounds[:,1]),1] = float('inf') + else: + __Bounds[numpy.isnan(__Bounds[:,0]),0] = -sys.float_info.max + __Bounds[numpy.isnan(__Bounds[:,1]),1] = sys.float_info.max return __Bounds # ============================================================================== @@ -829,7 +858,7 @@ def RecentredBounds( __Bounds, __Center, __Scale = None): return ForceNumericBounds( __Bounds ) - numpy.ravel( __Center ).reshape((-1,1)) else: # Recentre les valeurs numériques de bornes et change l'échelle par une matrice - return __Scale @ (ForceNumericBounds( __Bounds ) - numpy.ravel( __Center ).reshape((-1,1))) + return __Scale @ (ForceNumericBounds( __Bounds, False ) - numpy.ravel( __Center ).reshape((-1,1))) # ============================================================================== def ApplyBounds( __Vector, __Bounds, __newClip = True): @@ -859,8 +888,9 @@ def ApplyBounds( __Vector, __Bounds, __newClip = True): return __Vector # ============================================================================== -def Apply3DVarRecentringOnEnsemble(__EnXn, __EnXf, __Ynpu, __HO, __R, __B, __Betaf): +def Apply3DVarRecentringOnEnsemble(__EnXn, __EnXf, __Ynpu, __HO, __R, __B, __SuppPars): "Recentre l'ensemble Xn autour de l'analyse 3DVAR" + __Betaf = __SuppPars["HybridCovarianceEquilibrium"] # Xf = EnsembleMean( __EnXf ) Pf = Covariance( asCovariance=EnsembleErrorCovariance(__EnXf) ) @@ -868,8 +898,8 @@ def Apply3DVarRecentringOnEnsemble(__EnXn, __EnXf, __Ynpu, __HO, __R, __B, __Bet # selfB = PartialAlgorithm("3DVAR") selfB._parameters["Minimizer"] = "LBFGSB" - selfB._parameters["MaximumNumberOfIterations"] = 15000 - selfB._parameters["CostDecrementTolerance"] = 1.e-7 + selfB._parameters["MaximumNumberOfIterations"] = __SuppPars["HybridMaximumNumberOfIterations"] + selfB._parameters["CostDecrementTolerance"] = __SuppPars["HybridCostDecrementTolerance"] selfB._parameters["ProjectedGradientTolerance"] = -1 selfB._parameters["GradientNormTolerance"] = 1.e-05 selfB._parameters["StoreInternalVariables"] = False @@ -984,6 +1014,7 @@ def multiXOsteps(selfA, Xb, Y, U, HO, EM, CM, R, B, Q, oneCycle, # return 0 + # ============================================================================== if __name__ == "__main__": print('\n AUTODIAGNOSTIC\n') diff --git a/src/daComposant/daCore/Persistence.py b/src/daComposant/daCore/Persistence.py index f155146..b6c1173 100644 --- a/src/daComposant/daCore/Persistence.py +++ b/src/daComposant/daCore/Persistence.py @@ -273,7 +273,7 @@ class Persistence(object): """ try: return [numpy.mean(item, dtype=mfp).astype('float') for item in self.__values] - except: + except Exception: raise TypeError("Base type is incompatible with numpy") def stds(self, ddof=0): @@ -290,7 +290,7 @@ class Persistence(object): return [numpy.array(item).std(ddof=ddof, dtype=mfp).astype('float') for item in self.__values] else: return [numpy.array(item).std(dtype=mfp).astype('float') for item in self.__values] - except: + except Exception: raise TypeError("Base type is incompatible with numpy") def sums(self): @@ -301,7 +301,7 @@ class Persistence(object): """ try: return [numpy.array(item).sum() for item in self.__values] - except: + except Exception: raise TypeError("Base type is incompatible with numpy") def mins(self): @@ -312,7 +312,7 @@ class Persistence(object): """ try: return [numpy.array(item).min() for item in self.__values] - except: + except Exception: raise TypeError("Base type is incompatible with numpy") def maxs(self): @@ -323,7 +323,7 @@ class Persistence(object): """ try: return [numpy.array(item).max() for item in self.__values] - except: + except Exception: raise TypeError("Base type is incompatible with numpy") def norms(self, _ord=None): @@ -336,7 +336,7 @@ class Persistence(object): """ try: return [numpy.linalg.norm(item, _ord) for item in self.__values] - except: + except Exception: raise TypeError("Base type is incompatible with numpy") def maes(self, _predictor=None): @@ -353,7 +353,7 @@ class Persistence(object): if _predictor is None: try: return [numpy.mean(numpy.abs(item)) for item in self.__values] - except: + except Exception: raise TypeError("Base type is incompatible with numpy") else: if len(_predictor) != len(self.__values): @@ -363,7 +363,7 @@ class Persistence(object): raise ValueError("Predictor size at step %i is incompatible with the values"%i) try: return [numpy.mean(numpy.abs(numpy.ravel(item) - numpy.ravel(_predictor[i]))) for i, item in enumerate(self.__values)] - except: + except Exception: raise TypeError("Base type is incompatible with numpy") def mses(self, _predictor=None): @@ -381,7 +381,7 @@ class Persistence(object): try: __n = self.shape()[0] return [(numpy.linalg.norm(item)**2 / __n) for item in self.__values] - except: + except Exception: raise TypeError("Base type is incompatible with numpy") else: if len(_predictor) != len(self.__values): @@ -392,7 +392,7 @@ class Persistence(object): try: __n = self.shape()[0] return [(numpy.linalg.norm(numpy.ravel(item) - numpy.ravel(_predictor[i]))**2 / __n) for i, item in enumerate(self.__values)] - except: + except Exception: raise TypeError("Base type is incompatible with numpy") msds=mses # Mean-Square Deviation (MSD=MSE) @@ -412,7 +412,7 @@ class Persistence(object): try: __n = self.shape()[0] return [(numpy.linalg.norm(item) / math.sqrt(__n)) for item in self.__values] - except: + except Exception: raise TypeError("Base type is incompatible with numpy") else: if len(_predictor) != len(self.__values): @@ -423,7 +423,7 @@ class Persistence(object): try: __n = self.shape()[0] return [(numpy.linalg.norm(numpy.ravel(item) - numpy.ravel(_predictor[i])) / math.sqrt(__n)) for i, item in enumerate(self.__values)] - except: + except Exception: raise TypeError("Base type is incompatible with numpy") rmsds = rmses # Root-Mean-Square Deviation (RMSD=RMSE) @@ -567,7 +567,7 @@ class Persistence(object): """ try: return numpy.mean(self.__values, axis=0, dtype=mfp).astype('float') - except: + except Exception: raise TypeError("Base type is incompatible with numpy") def std(self, ddof=0): @@ -584,7 +584,7 @@ class Persistence(object): return numpy.asarray(self.__values).std(ddof=ddof,axis=0).astype('float') else: return numpy.asarray(self.__values).std(axis=0).astype('float') - except: + except Exception: raise TypeError("Base type is incompatible with numpy") def sum(self): @@ -595,7 +595,7 @@ class Persistence(object): """ try: return numpy.asarray(self.__values).sum(axis=0) - except: + except Exception: raise TypeError("Base type is incompatible with numpy") def min(self): @@ -606,7 +606,7 @@ class Persistence(object): """ try: return numpy.asarray(self.__values).min(axis=0) - except: + except Exception: raise TypeError("Base type is incompatible with numpy") def max(self): @@ -617,7 +617,7 @@ class Persistence(object): """ try: return numpy.asarray(self.__values).max(axis=0) - except: + except Exception: raise TypeError("Base type is incompatible with numpy") def cumsum(self): @@ -628,7 +628,7 @@ class Persistence(object): """ try: return numpy.asarray(self.__values).cumsum(axis=0) - except: + except Exception: raise TypeError("Base type is incompatible with numpy") def plot(self, @@ -1017,6 +1017,7 @@ class CompositePersistence(object): # return filename + # ============================================================================== if __name__ == "__main__": print('\n AUTODIAGNOSTIC\n') diff --git a/src/daComposant/daCore/PlatformInfo.py b/src/daComposant/daCore/PlatformInfo.py index 013cc0c..1bb6937 100644 --- a/src/daComposant/daCore/PlatformInfo.py +++ b/src/daComposant/daCore/PlatformInfo.py @@ -99,10 +99,10 @@ class PlatformInfo(object): elif sys.platform.startswith('darwin'): if hasattr(platform, 'mac_ver'): __macosxv = { - '0': 'Cheetah', '1': 'Puma', '2': 'Jaguar', - '3': 'Panther', '4': 'Tiger', '5': 'Leopard', - '6': 'Snow Leopard', '7': 'Lion', '8': 'Mountain Lion', - '9': 'Mavericks', '10': 'Yosemite', '11': 'El Capitan', + '0' : 'Cheetah', '1' : 'Puma', '2' : 'Jaguar', + '3' : 'Panther', '4' : 'Tiger', '5' : 'Leopard', + '6' : 'Snow Leopard', '7' : 'Lion', '8' : 'Mountain Lion', + '9' : 'Mavericks', '10': 'Yosemite', '11': 'El Capitan', '12': 'Sierra', '13': 'High Sierra', '14': 'Mojave', '15': 'Catalina', '16': 'Big Sur', '17': 'Monterey', } @@ -185,6 +185,14 @@ class PlatformInfo(object): __version = "0.0.0" return __version + def getSdfVersion(self): + "Retourne la version de sdf disponible" + if has_sdf: + __version = sdf.__version__ + else: + __version = "0.0.0" + return __version + def getCurrentMemorySize(self): "Retourne la taille mémoire courante utilisée" return 1 @@ -211,6 +219,7 @@ class PlatformInfo(object): import daCore.version as dav return "%s %s (%s)"%(dav.name,dav.version,dav.date) + # ============================================================================== try: import scipy @@ -324,7 +333,7 @@ def strmatrix2liststr( __strvect ): __strvect = __strvect.replace(s,";") # "]" et ")" par ";" __strvect = re.sub(r';\s*;',r';',__strvect) __strvect = __strvect.rstrip(";") # Après ^ et avant v - __strmat = [l.split() for l in __strvect.split(";")] + __strmat = [__l.split() for __l in __strvect.split(";")] return __strmat def checkFileNameConformity( __filename, __warnInsteadOfPrint=True ): @@ -485,6 +494,7 @@ class SystemUsage(object): "Renvoie la mémoire totale maximale mesurée" return self._VmB('VmPeak:', unit) + # ============================================================================== if __name__ == "__main__": print('\n AUTODIAGNOSTIC\n') diff --git a/src/daComposant/daCore/Reporting.py b/src/daComposant/daCore/Reporting.py index 0ccdba5..4f526f7 100644 --- a/src/daComposant/daCore/Reporting.py +++ b/src/daComposant/daCore/Reporting.py @@ -129,11 +129,9 @@ class __ReportV__(object): return 0 # ============================================================================== -# Classes d'interface utilisateur : ReportStorage, ReportViewIn* +# Classes d'interface utilisateur : ReportViewIn*, ReportStorage # Tags de structure : (title, h1, h2, h3, p, uli, oli, , ) -ReportStorage = __ReportC__ - class ReportViewInHtml(__ReportV__): """ Report in HTML @@ -295,6 +293,10 @@ class ReportViewInPlainTxt(__ReportV__): pg += "\n" return pg + +# Interface utilisateur de stockage des informations +ReportStorage = __ReportC__ + # ============================================================================== if __name__ == "__main__": print('\n AUTODIAGNOSTIC\n') diff --git a/src/daSalome/daYacsSchemaCreator/run.py b/src/daSalome/daYacsSchemaCreator/run.py index fb3b572..8517799 100644 --- a/src/daSalome/daYacsSchemaCreator/run.py +++ b/src/daSalome/daYacsSchemaCreator/run.py @@ -36,7 +36,8 @@ def create_schema(config_file, config_content, yacs_schema_filename): # Import config_file try: (fd, filename) = tempfile.mkstemp() - exec(compile(open(config_file).read(), filename, 'exec')) + with open(config_file, 'r') as fid: + exec(compile(fid.read(), filename, 'exec')) except Exception as e: if isinstance(e, SyntaxError): msg = "at %s: %s"%(e.offset, e.text) else: msg = "" -- 2.39.2