# -*- coding: utf-8 -*-
#
-# Copyright (C) 2008-2022 EDF R&D
+# Copyright (C) 2008-2023 EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
import copy
import time
import numpy
+import warnings
from functools import partial
from daCore import Persistence, PlatformInfo, Interfaces
from daCore import Templates
"""
def __init__(self,
toleranceInRedundancy = 1.e-18,
- lenghtOfRedundancy = -1,
+ lengthOfRedundancy = -1,
):
"""
Les caractéristiques de tolérance peuvent être modifiées à la création.
"""
self.__tolerBP = float(toleranceInRedundancy)
- self.__lenghtOR = int(lenghtOfRedundancy)
- self.__initlnOR = self.__lenghtOR
+ self.__lengthOR = int(lengthOfRedundancy)
+ self.__initlnOR = self.__lengthOR
self.__seenNames = []
self.__enabled = True
self.clearCache()
__alc = False
__HxV = None
if self.__enabled:
- for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
+ for i in range(min(len(self.__listOPCV),self.__lengthOR)-1,-1,-1):
if not hasattr(xValue, 'size'):
pass
elif (str(oName) != self.__listOPCV[i][3]):
def storeValueInX(self, xValue, HxValue, oName="" ):
"Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
- if self.__lenghtOR < 0:
- self.__lenghtOR = 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
- self.__initlnOR = self.__lenghtOR
+ if self.__lengthOR < 0:
+ self.__lengthOR = 2 * min(numpy.size(xValue), 50) + 2
+ self.__initlnOR = self.__lengthOR
self.__seenNames.append(str(oName))
if str(oName) not in self.__seenNames: # Etend la liste si nouveau
- self.__lenghtOR += 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
- self.__initlnOR += self.__lenghtOR
+ self.__lengthOR += 2 * min(numpy.size(xValue), 50) + 2
+ self.__initlnOR += self.__lengthOR
self.__seenNames.append(str(oName))
- while len(self.__listOPCV) > self.__lenghtOR:
+ while len(self.__listOPCV) > self.__lengthOR:
self.__listOPCV.pop(0)
self.__listOPCV.append( (
copy.copy(numpy.ravel(xValue)), # 0 Previous point
def disable(self):
"Inactive le cache"
- self.__initlnOR = self.__lenghtOR
- self.__lenghtOR = 0
+ self.__initlnOR = self.__lengthOR
+ self.__lengthOR = 0
self.__enabled = False
def enable(self):
"Active le cache"
- self.__lenghtOR = self.__initlnOR
+ self.__lengthOR = self.__initlnOR
self.__enabled = True
# ==============================================================================
else:
_hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
if not hasattr(_hserie, "pop"):
- raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
+ raise TypeError(
+ "The user input multi-function doesn't seem to return a"+\
+ " result sequence, behaving like a mono-function. It has"+\
+ " to be checked."
+ )
for i in _hindex:
_xv = _xserie.pop(0)
_hv = _hserie.pop(0)
__Function = asThreeFunctions
__Function.update({"useApproximatedDerivatives":True})
else:
- raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
+ raise ValueError(
+ "The functions has to be given in a dictionnary which have either"+\
+ " 1 key (\"Direct\") or"+\
+ " 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
if "Direct" not in asThreeFunctions:
__Function["Direct"] = asThreeFunctions["Tangent"]
__Function.update(__Parameters)
else:
__Function = None
#
- # if sys.version_info[0] < 3 and isinstance(__Function, dict):
- # for k in ("Direct", "Tangent", "Adjoint"):
- # if k in __Function and hasattr(__Function[k],"__class__"):
- # if type(__Function[k]) is type(self.__init__):
- # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
- #
if appliedInX is not None and isinstance(appliedInX, dict):
__appliedInX = appliedInX
elif appliedInX is not None:
if "withReducingMemoryUse" not in __Function: __Function["withReducingMemoryUse"] = __reduceM
if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = __avoidRC
if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
- if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
+ if "withLengthOfRedundancy" not in __Function: __Function["withLengthOfRedundancy"] = -1
if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
from daCore import NumericObjects
reducingMemoryUse = __Function["withReducingMemoryUse"],
avoidingRedundancy = __Function["withAvoidingRedundancy"],
toleranceInRedundancy = __Function["withToleranceInRedundancy"],
- lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
+ lengthOfRedundancy = __Function["withLengthOfRedundancy"],
mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
mpWorkers = __Function["NumberOfProcesses"],
mfEnabled = __Function["withmfEnabled"],
)
- self.__FO["Direct"] = Operator( name = self.__name, fromMethod = FDA.DirectOperator, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
- self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = FDA.TangentOperator, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
- self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = FDA.AdjointOperator, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
+ self.__FO["Direct"] = Operator(
+ name = self.__name,
+ fromMethod = FDA.DirectOperator,
+ reducingMemoryUse = __reduceM,
+ avoidingRedundancy = __avoidRC,
+ inputAsMultiFunction = inputAsMF,
+ extraArguments = self.__extraArgs,
+ enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
+ self.__FO["Tangent"] = Operator(
+ name = self.__name+"Tangent",
+ fromMethod = FDA.TangentOperator,
+ reducingMemoryUse = __reduceM,
+ avoidingRedundancy = __avoidRC,
+ inputAsMultiFunction = inputAsMF,
+ extraArguments = self.__extraArgs )
+ self.__FO["Adjoint"] = Operator(
+ name = self.__name+"Adjoint",
+ fromMethod = FDA.AdjointOperator,
+ reducingMemoryUse = __reduceM,
+ avoidingRedundancy = __avoidRC,
+ inputAsMultiFunction = inputAsMF,
+ extraArguments = self.__extraArgs )
elif isinstance(__Function, dict) and \
("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
(__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
- self.__FO["Direct"] = Operator( name = self.__name, fromMethod = __Function["Direct"], reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
- self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = __Function["Tangent"], reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
- self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = __Function["Adjoint"], reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
+ self.__FO["Direct"] = Operator(
+ name = self.__name,
+ fromMethod = __Function["Direct"],
+ reducingMemoryUse = __reduceM,
+ avoidingRedundancy = __avoidRC,
+ inputAsMultiFunction = inputAsMF,
+ extraArguments = self.__extraArgs,
+ enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
+ self.__FO["Tangent"] = Operator(
+ name = self.__name+"Tangent",
+ fromMethod = __Function["Tangent"],
+ reducingMemoryUse = __reduceM,
+ avoidingRedundancy = __avoidRC,
+ inputAsMultiFunction = inputAsMF,
+ extraArguments = self.__extraArgs )
+ self.__FO["Adjoint"] = Operator(
+ name = self.__name+"Adjoint",
+ fromMethod = __Function["Adjoint"],
+ reducingMemoryUse = __reduceM,
+ avoidingRedundancy = __avoidRC,
+ inputAsMultiFunction = inputAsMF,
+ extraArguments = self.__extraArgs )
elif asMatrix is not None:
if isinstance(__Matrix, str):
__Matrix = PlatformInfo.strmatrix2liststr( __Matrix )
__matrice = numpy.asarray( __Matrix, dtype=float )
- self.__FO["Direct"] = Operator( name = self.__name, fromMatrix = __matrice, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
- self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMatrix = __matrice, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF )
- self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMatrix = __matrice.T, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF )
+ self.__FO["Direct"] = Operator(
+ name = self.__name,
+ fromMatrix = __matrice,
+ reducingMemoryUse = __reduceM,
+ avoidingRedundancy = __avoidRC,
+ inputAsMultiFunction = inputAsMF,
+ enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
+ self.__FO["Tangent"] = Operator(
+ name = self.__name+"Tangent",
+ fromMatrix = __matrice,
+ reducingMemoryUse = __reduceM,
+ avoidingRedundancy = __avoidRC,
+ inputAsMultiFunction = inputAsMF )
+ self.__FO["Adjoint"] = Operator(
+ name = self.__name+"Adjoint",
+ fromMatrix = __matrice.T,
+ reducingMemoryUse = __reduceM,
+ avoidingRedundancy = __avoidRC,
+ inputAsMultiFunction = inputAsMF )
del __matrice
else:
- raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a matrix, a Direct operator for approximate derivatives or a Tangent/Adjoint operators pair. Please check your operator input."%self.__name)
+ raise ValueError(
+ "The %s object is improperly defined or undefined,"%self.__name+\
+ " it requires at minima either a matrix, a Direct operator for"+\
+ " approximate derivatives or a Tangent/Adjoint operators pair."+\
+ " Please check your operator input.")
#
if __appliedInX is not None:
self.__FO["AppliedInX"] = {}
- for key in list(__appliedInX.keys()):
- if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
- # Pour le cas où l'on a une vraie matrice
- self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
- elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
- # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
- self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
- else:
- self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
+ for key in __appliedInX:
+ if isinstance(__appliedInX[key], str):
+ __appliedInX[key] = PlatformInfo.strvect2liststr( __appliedInX[key] )
+ self.__FO["AppliedInX"][key] = numpy.ravel( __appliedInX[key] ).reshape((-1,1))
else:
self.__FO["AppliedInX"] = None
- CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
- CurrentOptimum : état optimal courant lors d'itérations
- CurrentState : état courant lors d'itérations
- - CurrentStepNumber : numéro courant de pas de mesure dans les algorithmes temporels
+ - CurrentStepNumber : pas courant d'avancement dans les algorithmes en évolution, à partir de 0
+ - EnsembleOfSimulations : ensemble d'états (sorties, simulations) rangés par colonne dans une matrice
+ - EnsembleOfSnapshots : ensemble d'états rangés par colonne dans une matrice
+ - EnsembleOfStates : ensemble d'états (entrées, paramètres) rangés par colonne dans une matrice
+ - ForecastCovariance : covariance de l'état prédit courant lors d'itérations
+ - ForecastState : état prédit courant lors d'itérations
- GradientOfCostFunctionJ : gradient de la fonction-coût globale
- GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
- GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
- MahalanobisConsistency : indicateur de consistance des covariances
- OMA : Observation moins Analyse : Y - Xa
- OMB : Observation moins Background : Y - Xb
- - ForecastCovariance : covariance de l'état prédit courant lors d'itérations
- - ForecastState : état prédit courant lors d'itérations
- Residu : dans le cas des algorithmes de vérification
- SampledStateForQuantiles : échantillons d'états pour l'estimation des quantiles
- SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
+ self.__replace_by_the_new_name = {} # Nouveau nom à partir d'un nom ancien
#
self.StoredVariables = {}
self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
+ self.StoredVariables["EnsembleOfSimulations"] = Persistence.OneMatrix(name = "EnsembleOfSimulations")
+ self.StoredVariables["EnsembleOfSnapshots"] = Persistence.OneMatrix(name = "EnsembleOfSnapshots")
+ self.StoredVariables["EnsembleOfStates"] = Persistence.OneMatrix(name = "EnsembleOfStates")
self.StoredVariables["ForecastCovariance"] = Persistence.OneMatrix(name = "ForecastCovariance")
self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
+ self.StoredVariables["OptimalPoints"] = Persistence.OneVector(name = "OptimalPoints")
+ self.StoredVariables["ReducedBasis"] = Persistence.OneMatrix(name = "ReducedBasis")
self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
+ self.StoredVariables["Residus"] = Persistence.OneVector(name = "Residus")
self.StoredVariables["SampledStateForQuantiles"] = Persistence.OneMatrix(name = "SampledStateForQuantiles")
self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
self._getTimeState(reset=True)
#
- # Mise a jour des paramètres internes avec le contenu de Parameters, en
+ # Mise à jour des paramètres internes avec le contenu de Parameters, en
# reprenant les valeurs par défauts pour toutes celles non définies
self.__setParameters(Parameters, reset=True)
for k, v in self.__variable_names_not_public.items():
if k not in self._parameters: self.__setParameters( {k:v} )
- #
+
# Corrections et compléments des vecteurs
def __test_vvalue(argument, variable, argname, symbol=None):
if symbol is None: symbol = variable
elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
logging.debug("%s %s vector %s is optional and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
else:
- logging.debug("%s %s vector %s is set although neither required nor optional, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
+ logging.debug(
+ "%s %s vector %s is set although neither required nor optional, and its size is %i."%(
+ self._name,argname,symbol,numpy.array(argument).size))
return 0
__test_vvalue( Xb, "Xb", "Background or initial state" )
__test_vvalue( Y, "Y", "Observation" )
__test_vvalue( U, "U", "Control" )
- #
+
# Corrections et compléments des covariances
def __test_cvalue(argument, variable, argname, symbol=None):
if symbol is None: symbol = variable
__test_cvalue( B, "B", "Background" )
__test_cvalue( R, "R", "Observation" )
__test_cvalue( Q, "Q", "Evolution" )
- #
+
# Corrections et compléments des opérateurs
def __test_ovalue(argument, variable, argname, symbol=None):
if symbol is None: symbol = variable
logging.debug("%s Bounds taken into account"%(self._name,))
else:
self._parameters["Bounds"] = None
- if ("StateBoundsForQuantiles" in self._parameters) and isinstance(self._parameters["StateBoundsForQuantiles"], (list, tuple)) and (len(self._parameters["StateBoundsForQuantiles"]) > 0):
+ if ("StateBoundsForQuantiles" in self._parameters) \
+ and isinstance(self._parameters["StateBoundsForQuantiles"], (list, tuple)) \
+ and (len(self._parameters["StateBoundsForQuantiles"]) > 0):
logging.debug("%s Bounds for quantiles states taken into account"%(self._name,))
# Attention : contrairement à Bounds, pas de défaut à None, sinon on ne peut pas être sans bornes
#
_C = numpy.dot(_EI, numpy.dot(_A, _EI))
self.StoredVariables["APosterioriCorrelations"].store( _C )
if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
- logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
- logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
+ logging.debug(
+ "%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i",
+ self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
+ logging.debug(
+ "%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i",
+ self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
logging.debug("%s Terminé", self._name)
else:
try:
msg = "'%s'"%k
- except:
+ except Exception:
raise TypeError("pop expected at least 1 arguments, got 0")
"If key is not found, d is returned if given, otherwise KeyError is raised"
try:
return d
- except:
+ except Exception:
raise KeyError(msg)
def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
"""
- Doit implémenter l'opération élémentaire de calcul d'assimilation sous
- sa forme mathématique la plus naturelle possible.
+ Doit implémenter l'opération élémentaire de calcul algorithmique.
"""
- raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
-
- def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None, listadv = None):
+ raise NotImplementedError("Mathematical algorithmic calculation has not been implemented!")
+
+ def defineRequiredParameter(self,
+ name = None,
+ default = None,
+ typecast = None,
+ message = None,
+ minval = None,
+ maxval = None,
+ listval = None,
+ listadv = None,
+ oldname = None,
+ ):
"""
Permet de définir dans l'algorithme des paramètres requis et leurs
caractéristiques par défaut.
"listval" : listval,
"listadv" : listadv,
"message" : message,
+ "oldname" : oldname,
}
self.__canonical_parameter_name[name.lower()] = name
+ if oldname is not None:
+ self.__canonical_parameter_name[oldname.lower()] = name # Conversion
+ self.__replace_by_the_new_name[oldname.lower()] = name
logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
def getRequiredParameters(self, noDetails=True):
else:
try:
__val = typecast( value )
- except:
+ except Exception:
raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
#
if minval is not None and (numpy.array(__val, float) < minval).any():
__inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
#~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
__canonic_fromDico_keys = __inverse_fromDico_keys.keys()
+ #
+ for k in __inverse_fromDico_keys.values():
+ if k.lower() in self.__replace_by_the_new_name:
+ __newk = self.__replace_by_the_new_name[k.lower()]
+ __msg = "the parameter \"%s\" used in \"%s\" algorithm case is deprecated and has to be replaced by \"%s\"."%(k,self._name,__newk)
+ __msg += " Please update your code."
+ warnings.warn(__msg, FutureWarning, stacklevel=50)
+ #
for k in self.__required_parameters.keys():
if k in __canonic_fromDico_keys:
self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
asDict = None,
asScript = None,
):
- "Mise a jour des parametres"
+ "Mise à jour des paramètres"
if asDict is None and asScript is not None:
__Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
else:
try:
catalogAd = r.loadCatalog("proc", __file)
r.addCatalog(catalogAd)
- except:
+ except Exception:
pass
try:
if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
module_path = os.path.abspath(os.path.join(directory, daDirectory))
if module_path is None:
- raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
+ raise ImportError(
+ "No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
#
# Importe le fichier complet comme un module
# ------------------------------------------
self.__algorithmName = str(choice)
sys.path = sys_path_tmp ; del sys_path_tmp
except ImportError as e:
- raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
+ raise ImportError(
+ "The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
#
# Instancie un objet du type élémentaire du fichier
# -------------------------------------------------
raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
#
if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
- raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
+ raise ValueError(
+ "Shape characteristic of observation operator (H)"+\
+ " \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
- raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
+ raise ValueError(
+ "Shape characteristic of observation operator (H)"+\
+ " \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
- raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
+ raise ValueError(
+ "Shape characteristic of observation operator (H)"+\
+ " \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
- raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
+ raise ValueError(
+ "Shape characteristic of observation operator (H)"+\
+ " \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
#
if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
if self.__algorithmName in ["EnsembleBlue",]:
self.__Xb.store( numpy.asarray(member, dtype=float) )
__Xb_shape = min(__B_shape)
else:
- raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
+ raise ValueError(
+ "Shape characteristic of a priori errors covariance matrix (B)"+\
+ " \"%s\" and background vector (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
#
if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
- raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
+ raise ValueError(
+ "Shape characteristic of observation errors covariance matrix (R)"+\
+ " \"%s\" and observation vector (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
#
if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
- raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
+ raise ValueError(
+ "Shape characteristic of evolution model (EM)"+\
+ " \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
#
if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
- raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
+ raise ValueError(
+ "Shape characteristic of control model (CM)"+\
+ " \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
#
if ("Bounds" in self.__P) \
and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
# ==============================================================================
class ExternalParameters(object):
"""
- Classe générale d'interface de type texte de script utilisateur
+ Classe générale d'interface pour le stockage des paramètres externes
"""
def __init__(self,
name = "GenericExternalParameters",
asDict = None,
asScript = None,
):
- "Mise a jour des parametres"
+ "Mise à jour des paramètres"
if asDict is None and asScript is not None:
__Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
else:
self.shape = (self.shape[0],1)
self.size = self.shape[0] * self.shape[1]
else:
- raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
+ raise ValueError(
+ "The %s object is improperly defined or undefined,"%self.__name+\
+ " it requires at minima either a vector, a list/tuple of"+\
+ " vectors or a persistent object. Please check your vector input.")
#
if scheduledBy is not None:
self.__T = scheduledBy
__Scalar = PlatformInfo.strvect2liststr( __Scalar )
if len(__Scalar) > 0: __Scalar = __Scalar[0]
if numpy.array(__Scalar).size != 1:
- raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.array(__Scalar).size)
+ raise ValueError(
+ " The diagonal multiplier given to define a sparse matrix is"+\
+ " not a unique scalar value.\n Its actual measured size is"+\
+ " %i. Please check your scalar input."%numpy.array(__Scalar).size)
self.__is_scalar = True
self.__C = numpy.abs( float(__Scalar) )
self.shape = (0,0)
self.size = 0
else:
pass
- # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
#
self.__validate()
if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
try:
numpy.linalg.cholesky( self.__C )
- except:
+ except Exception:
raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
try:
self.__C.cholesky()
- except:
+ except Exception:
raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
def isscalar(self):
elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
return self.__C * numpy.asmatrix(other)
else:
- raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
+ raise ValueError(
+ "operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
if numpy.ravel(other).size == self.shape[1]: # Vecteur
return numpy.asmatrix(self.__C * numpy.ravel(other)).T
elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
else:
- raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
+ raise ValueError(
+ "operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
elif self.isscalar() and isinstance(other,numpy.matrix):
return self.__C * other
elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
elif self.isobject():
return self.__C.__mul__(other)
else:
- raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
+ raise NotImplementedError(
+ "%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
def __rmatmul__(self, other):
"x.__rmul__(y) <==> y@x"
elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
return numpy.asmatrix(other) * self.__C
else:
- raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
+ raise ValueError(
+ "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
elif self.isvector() and isinstance(other,numpy.matrix):
if numpy.ravel(other).size == self.shape[0]: # Vecteur
return numpy.asmatrix(numpy.ravel(other) * self.__C)
elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
return numpy.asmatrix(numpy.array(other) * self.__C)
else:
- raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
+ raise ValueError(
+ "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
elif self.isscalar() and isinstance(other,numpy.matrix):
return other * self.__C
elif self.isobject():
return self.__C.__rmatmul__(other)
else:
- raise NotImplementedError("%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name,type(other)))
+ raise NotImplementedError(
+ "%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name,type(other)))
def __rmul__(self, other):
"x.__rmul__(y) <==> y*x"
elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
return numpy.asmatrix(other) * self.__C
else:
- raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
+ raise ValueError(
+ "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
elif self.isvector() and isinstance(other,numpy.matrix):
if numpy.ravel(other).size == self.shape[0]: # Vecteur
return numpy.asmatrix(numpy.ravel(other) * self.__C)
elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
return numpy.asmatrix(numpy.array(other) * self.__C)
else:
- raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
+ raise ValueError(
+ "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
elif self.isscalar() and isinstance(other,numpy.matrix):
return other * self.__C
elif self.isscalar() and isinstance(other,float):
elif self.isobject():
return self.__C.__rmul__(other)
else:
- raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
+ raise NotImplementedError(
+ "%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
def __len__(self):
"x.__len__() <==> len(x)"