# -*- coding: utf-8 -*-
#
-# Copyright (C) 2008-2021 EDF R&D
+# Copyright (C) 2008-2022 EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
import copy
import time
import numpy
+import warnings
from functools import partial
from daCore import Persistence, PlatformInfo, Interfaces
from daCore import Templates
Arguments :
- name : nom d'opérateur
- fromMethod : argument de type fonction Python
- - fromMatrix : argument adapté au constructeur numpy.matrix
+ - fromMatrix : argument adapté au constructeur numpy.array/matrix
- avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
- reducingMemoryUse : booléen forçant (ou pas) des calculs moins
gourmands en mémoire
self.__Type = "Method"
elif fromMatrix is not None:
self.__Method = None
- self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
+ if isinstance(fromMatrix, str):
+ fromMatrix = PlatformInfo.strmatrix2liststr( fromMatrix )
+ self.__Matrix = numpy.asarray( fromMatrix, dtype=float )
self.__Type = "Matrix"
else:
self.__Method = None
assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
_HxValue = []
for i in range(len(_HValue)):
- _HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T )
+ _HxValue.append( _HValue[i] )
if self.__avoidRC:
Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name)
else:
else:
if self.__Matrix is not None:
self.__addOneMatrixCall()
- _xv = numpy.ravel(xv).reshape((-1,1))
- _hv = self.__Matrix * _xv
+ _hv = self.__Matrix @ numpy.ravel(xv)
else:
self.__addOneMethodCall()
_xserie.append( xv )
_HxValue = []
for paire in _xuValue:
_xValue, _uValue = paire
- _xValue = numpy.matrix(numpy.ravel(_xValue)).T
self.__addOneMatrixCall()
- _HxValue.append( self.__Matrix * _xValue )
+ _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
else:
_xuArgs = []
for paire in _xuValue:
_HxValue = []
for paire in _nxValue:
_xNominal, _xValue = paire
- _xValue = numpy.matrix(numpy.ravel(_xValue)).T
self.__addOneMatrixCall()
- _HxValue.append( self.__Matrix * _xValue )
+ _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
else:
self.__addOneMethodCall( len(_nxValue) )
if self.__extraArgs is None:
if argsAsSerie:
self.__addOneMethodCall( len(ValueForMethodForm) )
for _vfmf in ValueForMethodForm:
- mValue.append( numpy.matrix( self.__Method(((_vfmf, None),)) ) )
+ mValue.append( self.__Method(((_vfmf, None),)) )
else:
self.__addOneMethodCall()
mValue = self.__Method(((ValueForMethodForm, None),))
self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = __Function["Tangent"], reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = __Function["Adjoint"], reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
elif asMatrix is not None:
- __matrice = numpy.matrix( __Matrix, numpy.float )
+ if isinstance(__Matrix, str):
+ __Matrix = PlatformInfo.strmatrix2liststr( __Matrix )
+ __matrice = numpy.asarray( __Matrix, dtype=float )
self.__FO["Direct"] = Operator( name = self.__name, fromMatrix = __matrice, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMatrix = __matrice, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF )
self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMatrix = __matrice.T, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF )
#
if __appliedInX is not None:
self.__FO["AppliedInX"] = {}
- for key in list(__appliedInX.keys()):
- if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
- # Pour le cas où l'on a une vraie matrice
- self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
- elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
- # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
- self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
- else:
- self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
+ for key in __appliedInX:
+ if isinstance(__appliedInX[key], str):
+ __appliedInX[key] = PlatformInfo.strvect2liststr( __appliedInX[key] )
+ self.__FO["AppliedInX"][key] = numpy.ravel( __appliedInX[key] ).reshape((-1,1))
else:
self.__FO["AppliedInX"] = None
- CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
- CurrentOptimum : état optimal courant lors d'itérations
- CurrentState : état courant lors d'itérations
+ - CurrentStepNumber : pas courant d'avancement dans les algorithmes en évolution, à partir de 0
- GradientOfCostFunctionJ : gradient de la fonction-coût globale
- GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
- GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
+ self.__replace_by_the_new_name = {} # Nouveau nom à partir d'un nom ancien
#
self.StoredVariables = {}
self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
+ self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
self.StoredVariables["ForecastCovariance"] = Persistence.OneMatrix(name = "ForecastCovariance")
self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
# Verbosité et logging
if logging.getLogger().level < logging.WARNING:
self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
- if PlatformInfo.has_scipy:
- import scipy.optimize
- self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
- else:
- self._parameters["optmessages"] = 15
+ self._parameters["optmessages"] = 15
else:
self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
- if PlatformInfo.has_scipy:
- import scipy.optimize
- self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
- else:
- self._parameters["optmessages"] = 15
+ self._parameters["optmessages"] = 0
#
return 0
"""
raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
- def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None, listadv = None):
+ def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None, listadv = None, oldname = None):
"""
Permet de définir dans l'algorithme des paramètres requis et leurs
caractéristiques par défaut.
"listval" : listval,
"listadv" : listadv,
"message" : message,
+ "oldname" : oldname,
}
self.__canonical_parameter_name[name.lower()] = name
+ if oldname is not None:
+ self.__canonical_parameter_name[oldname.lower()] = name # Conversion
+ self.__replace_by_the_new_name[oldname.lower()] = name
logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
def getRequiredParameters(self, noDetails=True):
__inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
#~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
__canonic_fromDico_keys = __inverse_fromDico_keys.keys()
+ #
+ for k in __inverse_fromDico_keys.values():
+ if k.lower() in self.__replace_by_the_new_name:
+ __newk = self.__replace_by_the_new_name[k.lower()]
+ __msg = "the parameter '%s' used in '%s' algorithm case is deprecated and has to be replaced by '%s'. Please update your code."%(k,self._name,__newk)
+ warnings.warn(__msg, FutureWarning, stacklevel=50)
+ #
for k in self.__required_parameters.keys():
if k in __canonic_fromDico_keys:
self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
else:
return __SC
+# ==============================================================================
+class PartialAlgorithm(object):
+ """
+ Classe pour mimer "Algorithm" du point de vue stockage, mais sans aucune
+ action avancée comme la vérification . Pour les méthodes reprises ici,
+ le fonctionnement est identique à celles de la classe "Algorithm".
+ """
+ def __init__(self, name):
+ self._name = str( name )
+ self._parameters = {"StoreSupplementaryCalculations":[]}
+ #
+ self.StoredVariables = {}
+ self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
+ self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
+ self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
+ self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
+ self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
+ self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
+ #
+ self.__canonical_stored_name = {}
+ for k in self.StoredVariables:
+ self.__canonical_stored_name[k.lower()] = k
+
+ def _toStore(self, key):
+ "True if in StoreSupplementaryCalculations, else False"
+ return key in self._parameters["StoreSupplementaryCalculations"]
+
+ def get(self, key=None):
+ """
+ Renvoie l'une des variables stockées identifiée par la clé, ou le
+ dictionnaire de l'ensemble des variables disponibles en l'absence de
+ clé. Ce sont directement les variables sous forme objet qui sont
+ renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
+ des classes de persistance.
+ """
+ if key is not None:
+ return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
+ else:
+ return self.StoredVariables
+
# ==============================================================================
class AlgorithmAndParameters(object):
"""
if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
if self.__algorithmName in ["EnsembleBlue",]:
asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
- self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
+ self.__Xb = Persistence.OneVector("Background")
for member in asPersistentVector:
- self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
+ self.__Xb.store( numpy.asarray(member, dtype=float) )
__Xb_shape = min(__B_shape)
else:
raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
#
if __Vector is not None:
self.__is_vector = True
- self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
+ if isinstance(__Vector, str):
+ __Vector = PlatformInfo.strvect2liststr( __Vector )
+ self.__V = numpy.ravel(numpy.asarray( __Vector, dtype=float )).reshape((-1,1))
self.shape = self.__V.shape
self.size = self.__V.size
elif __Series is not None:
self.__is_series = True
if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
- self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
- if isinstance(__Series, str): __Series = eval(__Series)
+ self.__V = Persistence.OneVector(self.__name)
+ if isinstance(__Series, str):
+ __Series = PlatformInfo.strmatrix2liststr(__Series)
for member in __Series:
- self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
+ if isinstance(member, str):
+ member = PlatformInfo.strvect2liststr( member )
+ self.__V.store(numpy.asarray( member, dtype=float ))
else:
self.__V = __Series
if isinstance(self.__V.shape, (tuple, list)):
#
if __Scalar is not None:
if isinstance(__Scalar, str):
- __Scalar = __Scalar.replace(";"," ").replace(","," ").split()
+ __Scalar = PlatformInfo.strvect2liststr( __Scalar )
if len(__Scalar) > 0: __Scalar = __Scalar[0]
if numpy.array(__Scalar).size != 1:
raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.array(__Scalar).size)
self.size = 0
elif __Vector is not None:
if isinstance(__Vector, str):
- __Vector = __Vector.replace(";"," ").replace(","," ").split()
+ __Vector = PlatformInfo.strvect2liststr( __Vector )
self.__is_vector = True
- self.__C = numpy.abs( numpy.array( numpy.ravel( __Vector ), dtype=float ) )
+ self.__C = numpy.abs( numpy.ravel(numpy.asarray( __Vector, dtype=float )) )
self.shape = (self.__C.size,self.__C.size)
self.size = self.__C.size**2
elif __Matrix is not None:
raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
try:
- L = numpy.linalg.cholesky( self.__C )
+ numpy.linalg.cholesky( self.__C )
except:
raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
try:
- L = self.__C.cholesky()
+ self.__C.cholesky()
except:
raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
def asfullmatrix(self, msize=None):
"Matrice pleine"
if self.ismatrix():
- return numpy.asarray(self.__C)
+ return numpy.asarray(self.__C, dtype=float)
elif self.isvector():
- return numpy.asarray( numpy.diag(self.__C), float )
+ return numpy.asarray( numpy.diag(self.__C), dtype=float )
elif self.isscalar():
if msize is None:
raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
else:
- return numpy.asarray( self.__C * numpy.eye(int(msize)), float )
+ return numpy.asarray( self.__C * numpy.eye(int(msize)), dtype=float )
elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
return self.__C.asfullmatrix()
else:
return self.__C + numpy.asmatrix(other)
elif self.isvector() or self.isscalar():
_A = numpy.asarray(other)
- _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
+ if len(_A.shape) == 1:
+ _A.reshape((-1,1))[::2] += self.__C
+ else:
+ _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
return numpy.asmatrix(_A)
def __radd__(self, other):
raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
elif self.isscalar() and isinstance(other,numpy.matrix):
return other * self.__C
+ elif self.isscalar() and isinstance(other,float):
+ return other * self.__C
elif self.isobject():
return self.__C.__rmul__(other)
else:
if __mpEnabled:
_jobs = __xserie
# logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
- import multiprocessing
with multiprocessing.Pool(__mpWorkers) as pool:
__multiHX = pool.map( _sFunction, _jobs )
pool.close()
# logging.debug("MULTF Internal multifonction calculations end")
return __multiHX
-# ==============================================================================
-def CostFunction3D(_x,
- _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
- _HmX = None, # Simulation déjà faite de Hm(x)
- _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
- _BI = None,
- _RI = None,
- _Xb = None,
- _Y = None,
- _SIV = False, # A résorber pour la 8.0
- _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
- _nPS = 0, # nbPreviousSteps
- _QM = "DA", # QualityMeasure
- _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
- _fRt = False, # Restitue ou pas la sortie étendue
- _sSc = True, # Stocke ou pas les SSC
- ):
- """
- Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
- et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
- DFO, QuantileRegression
- """
- if not _sSc:
- _SIV = False
- _SSC = {}
- else:
- for k in ["CostFunctionJ",
- "CostFunctionJb",
- "CostFunctionJo",
- "CurrentOptimum",
- "CurrentState",
- "IndexOfOptimum",
- "SimulatedObservationAtCurrentOptimum",
- "SimulatedObservationAtCurrentState",
- ]:
- if k not in _SSV:
- _SSV[k] = []
- if hasattr(_SSV[k],"store"):
- _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
- #
- _X = numpy.asmatrix(numpy.ravel( _x )).T
- if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
- _SSV["CurrentState"].append( _X )
- #
- if _HmX is not None:
- _HX = _HmX
- else:
- if _Hm is None:
- raise ValueError("COSTFUNCTION3D Operator has to be defined.")
- if _arg is None:
- _HX = _Hm( _X )
- else:
- _HX = _Hm( _X, *_arg )
- _HX = numpy.asmatrix(numpy.ravel( _HX )).T
- #
- if "SimulatedObservationAtCurrentState" in _SSC or \
- "SimulatedObservationAtCurrentOptimum" in _SSC:
- _SSV["SimulatedObservationAtCurrentState"].append( _HX )
- #
- if numpy.any(numpy.isnan(_HX)):
- Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
- else:
- _Y = numpy.asmatrix(numpy.ravel( _Y )).T
- if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
- if _BI is None or _RI is None:
- raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
- _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
- Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
- Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
- elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
- if _RI is None:
- raise ValueError("Observation error covariance matrix has to be properly defined!")
- Jb = 0.
- Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
- elif _QM in ["LeastSquares", "LS", "L2"]:
- Jb = 0.
- Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
- elif _QM in ["AbsoluteValue", "L1"]:
- Jb = 0.
- Jo = numpy.sum( numpy.abs(_Y - _HX) )
- elif _QM in ["MaximumError", "ME"]:
- Jb = 0.
- Jo = numpy.max( numpy.abs(_Y - _HX) )
- elif _QM in ["QR", "Null"]:
- Jb = 0.
- Jo = 0.
- else:
- raise ValueError("Unknown asked quality measure!")
- #
- J = float( Jb ) + float( Jo )
- #
- if _sSc:
- _SSV["CostFunctionJb"].append( Jb )
- _SSV["CostFunctionJo"].append( Jo )
- _SSV["CostFunctionJ" ].append( J )
- #
- if "IndexOfOptimum" in _SSC or \
- "CurrentOptimum" in _SSC or \
- "SimulatedObservationAtCurrentOptimum" in _SSC:
- IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
- if "IndexOfOptimum" in _SSC:
- _SSV["IndexOfOptimum"].append( IndexMin )
- if "CurrentOptimum" in _SSC:
- _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
- if "SimulatedObservationAtCurrentOptimum" in _SSC:
- _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
- #
- if _fRt:
- return _SSV
- else:
- if _QM in ["QR"]: # Pour le QuantileRegression
- return _HX
- else:
- return J
-
# ==============================================================================
if __name__ == "__main__":
print('\n AUTODIAGNOSTIC\n')