# -*- coding: utf-8 -*-
#
-# Copyright (C) 2008-2020 EDF R&D
+# Copyright (C) 2008-2022 EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
import sys
import logging
import copy
+import time
import numpy
+import warnings
from functools import partial
from daCore import Persistence, PlatformInfo, Interfaces
from daCore import Templates
def clearCache(self):
"Vide le cache"
- self.__listOPCV = [] # Previous Calculated Points, Results, Point Norms, Operator
+ self.__listOPCV = []
self.__seenNames = []
- # logging.debug("CM Tolerance de determination des doublons : %.2e", self.__tolerBP)
- def wasCalculatedIn(self, xValue, oName="" ): #, info="" ):
+ def wasCalculatedIn(self, xValue, oName="" ):
"Vérifie l'existence d'un calcul correspondant à la valeur"
__alc = False
__HxV = None
if self.__enabled:
for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
- if not hasattr(xValue, 'size') or (str(oName) != self.__listOPCV[i][3]) or (xValue.size != self.__listOPCV[i][0].size):
- # logging.debug("CM Différence de la taille %s de X et de celle %s du point %i déjà calculé", xValue.shape,i,self.__listOPCP[i].shape)
+ if not hasattr(xValue, 'size'):
pass
- elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < self.__tolerBP * self.__listOPCV[i][2]:
+ elif (str(oName) != self.__listOPCV[i][3]):
+ pass
+ elif (xValue.size != self.__listOPCV[i][0].size):
+ pass
+ elif (numpy.ravel(xValue)[0] - self.__listOPCV[i][0][0]) > (self.__tolerBP * self.__listOPCV[i][2] / self.__listOPCV[i][0].size):
+ pass
+ elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < (self.__tolerBP * self.__listOPCV[i][2]):
__alc = True
__HxV = self.__listOPCV[i][1]
- # logging.debug("CM Cas%s déja calculé, portant le numéro %i", info, i)
break
return __alc, __HxV
def storeValueInX(self, xValue, HxValue, oName="" ):
"Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
if self.__lenghtOR < 0:
- self.__lenghtOR = 2 * xValue.size + 2
+ self.__lenghtOR = 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
self.__initlnOR = self.__lenghtOR
self.__seenNames.append(str(oName))
if str(oName) not in self.__seenNames: # Etend la liste si nouveau
- self.__lenghtOR += 2 * xValue.size + 2
+ self.__lenghtOR += 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
self.__initlnOR += self.__lenghtOR
self.__seenNames.append(str(oName))
while len(self.__listOPCV) > self.__lenghtOR:
- # logging.debug("CM Réduction de la liste des cas à %i éléments par suppression du premier", self.__lenghtOR)
self.__listOPCV.pop(0)
self.__listOPCV.append( (
- copy.copy(numpy.ravel(xValue)),
- copy.copy(HxValue),
- numpy.linalg.norm(xValue),
- str(oName),
+ copy.copy(numpy.ravel(xValue)), # 0 Previous point
+ copy.copy(HxValue), # 1 Previous value
+ numpy.linalg.norm(xValue), # 2 Norm
+ str(oName), # 3 Operator name
) )
def disable(self):
fromMethod = None,
fromMatrix = None,
avoidingRedundancy = True,
+ reducingMemoryUse = False,
inputAsMultiFunction = False,
enableMultiProcess = False,
extraArguments = None,
Arguments :
- name : nom d'opérateur
- fromMethod : argument de type fonction Python
- - fromMatrix : argument adapté au constructeur numpy.matrix
+ - fromMatrix : argument adapté au constructeur numpy.array/matrix
- avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
+ - reducingMemoryUse : booléen forçant (ou pas) des calculs moins
+ gourmands en mémoire
- inputAsMultiFunction : booléen indiquant une fonction explicitement
définie (ou pas) en multi-fonction
- extraArguments : arguments supplémentaires passés à la fonction de
"""
self.__name = str(name)
self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
- self.__AvoidRC = bool( avoidingRedundancy )
+ self.__reduceM = bool( reducingMemoryUse )
+ self.__avoidRC = bool( avoidingRedundancy )
self.__inputAsMF = bool( inputAsMultiFunction )
self.__mpEnabled = bool( enableMultiProcess )
self.__extraArgs = extraArguments
self.__Type = "Method"
elif fromMatrix is not None:
self.__Method = None
- self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
+ if isinstance(fromMatrix, str):
+ fromMatrix = PlatformInfo.strmatrix2liststr( fromMatrix )
+ self.__Matrix = numpy.asarray( fromMatrix, dtype=float )
self.__Type = "Matrix"
else:
self.__Method = None
def enableAvoidingRedundancy(self):
"Active le cache"
- if self.__AvoidRC:
+ if self.__avoidRC:
Operator.CM.enable()
else:
Operator.CM.disable()
"Renvoie le type"
return self.__Type
- def appliedTo(self, xValue, HValue = None, argsAsSerie = False):
+ def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False):
"""
Permet de restituer le résultat de l'application de l'opérateur à une
série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
#
if _HValue is not None:
assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
- HxValue = []
+ _HxValue = []
for i in range(len(_HValue)):
- HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T )
- if self.__AvoidRC:
- Operator.CM.storeValueInX(_xValue[i],HxValue[-1],self.__name)
+ _HxValue.append( _HValue[i] )
+ if self.__avoidRC:
+ Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name)
else:
- HxValue = []
+ _HxValue = []
_xserie = []
_hindex = []
for i, xv in enumerate(_xValue):
- if self.__AvoidRC:
+ if self.__avoidRC:
__alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
else:
__alreadyCalculated = False
else:
if self.__Matrix is not None:
self.__addOneMatrixCall()
- _hv = self.__Matrix * xv
+ _hv = self.__Matrix @ numpy.ravel(xv)
else:
self.__addOneMethodCall()
_xserie.append( xv )
_hindex.append( i )
_hv = None
- HxValue.append( _hv )
+ _HxValue.append( _hv )
#
if len(_xserie)>0 and self.__Matrix is None:
if self.__extraArgs is None:
for i in _hindex:
_xv = _xserie.pop(0)
_hv = _hserie.pop(0)
- HxValue[i] = _hv
- if self.__AvoidRC:
+ _HxValue[i] = _hv
+ if self.__avoidRC:
Operator.CM.storeValueInX(_xv,_hv,self.__name)
#
- if argsAsSerie: return HxValue
- else: return HxValue[-1]
+ if returnSerieAsArrayMatrix:
+ _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
+ #
+ if argsAsSerie: return _HxValue
+ else: return _HxValue[-1]
- def appliedControledFormTo(self, paires, argsAsSerie = False):
+ def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
"""
Permet de restituer le résultat de l'application de l'opérateur à des
paires (xValue, uValue). Cette méthode se contente d'appliquer, son
PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
#
if self.__Matrix is not None:
- HxValue = []
+ _HxValue = []
for paire in _xuValue:
_xValue, _uValue = paire
self.__addOneMatrixCall()
- HxValue.append( self.__Matrix * _xValue )
+ _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
else:
- HxValue = []
+ _xuArgs = []
for paire in _xuValue:
- _xuValue = []
_xValue, _uValue = paire
if _uValue is not None:
- _xuValue.append( paire )
+ _xuArgs.append( paire )
else:
- _xuValue.append( _xValue )
- self.__addOneMethodCall( len(_xuValue) )
+ _xuArgs.append( _xValue )
+ self.__addOneMethodCall( len(_xuArgs) )
if self.__extraArgs is None:
- HxValue = self.__Method( _xuValue ) # Calcul MF
+ _HxValue = self.__Method( _xuArgs ) # Calcul MF
else:
- HxValue = self.__Method( _xuValue, self.__extraArgs ) # Calcul MF
+ _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF
+ #
+ if returnSerieAsArrayMatrix:
+ _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
#
- if argsAsSerie: return HxValue
- else: return HxValue[-1]
+ if argsAsSerie: return _HxValue
+ else: return _HxValue[-1]
- def appliedInXTo(self, paires, argsAsSerie = False):
+ def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
"""
Permet de restituer le résultat de l'application de l'opérateur à une
série d'arguments xValue, sachant que l'opérateur est valable en
PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
#
if self.__Matrix is not None:
- HxValue = []
+ _HxValue = []
for paire in _nxValue:
_xNominal, _xValue = paire
self.__addOneMatrixCall()
- HxValue.append( self.__Matrix * _xValue )
+ _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
else:
self.__addOneMethodCall( len(_nxValue) )
if self.__extraArgs is None:
- HxValue = self.__Method( _nxValue ) # Calcul MF
+ _HxValue = self.__Method( _nxValue ) # Calcul MF
else:
- HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
+ _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
+ #
+ if returnSerieAsArrayMatrix:
+ _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
#
- if argsAsSerie: return HxValue
- else: return HxValue[-1]
+ if argsAsSerie: return _HxValue
+ else: return _HxValue[-1]
def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
"""
if argsAsSerie:
self.__addOneMethodCall( len(ValueForMethodForm) )
for _vfmf in ValueForMethodForm:
- mValue.append( numpy.matrix( self.__Method(((_vfmf, None),)) ) )
+ mValue.append( self.__Method(((_vfmf, None),)) )
else:
self.__addOneMethodCall()
mValue = self.__Method(((ValueForMethodForm, None),))
asDict = None, # Parameters
appliedInX = None,
extraArguments = None,
- avoidRC = True,
+ performancePrf = None,
inputAsMF = False,# Fonction(s) as Multi-Functions
scheduledBy = None,
toBeChecked = False,
__Parameters["EnableMultiProcessingInEvaluation"] = False
if "withIncrement" in __Parameters: # Temporaire
__Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
+ # Le défaut est équivalent à "ReducedOverallRequirements"
+ __reduceM, __avoidRC = True, True
+ if performancePrf is not None:
+ if performancePrf == "ReducedAmountOfCalculation":
+ __reduceM, __avoidRC = False, True
+ elif performancePrf == "ReducedMemoryFootprint":
+ __reduceM, __avoidRC = True, False
+ elif performancePrf == "NoSavings":
+ __reduceM, __avoidRC = False, False
#
if asScript is not None:
__Matrix, __Function = None, None
if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
if "withdX" not in __Function: __Function["withdX"] = None
- if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = avoidRC
+ if "withReducingMemoryUse" not in __Function: __Function["withReducingMemoryUse"] = __reduceM
+ if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = __avoidRC
if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
centeredDF = __Function["CenteredFiniteDifference"],
increment = __Function["DifferentialIncrement"],
dX = __Function["withdX"],
+ extraArguments = self.__extraArgs,
+ reducingMemoryUse = __Function["withReducingMemoryUse"],
avoidingRedundancy = __Function["withAvoidingRedundancy"],
toleranceInRedundancy = __Function["withToleranceInRedundancy"],
lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
mpWorkers = __Function["NumberOfProcesses"],
mfEnabled = __Function["withmfEnabled"],
)
- self.__FO["Direct"] = Operator( name = self.__name, fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
- self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
- self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
+ self.__FO["Direct"] = Operator( name = self.__name, fromMethod = FDA.DirectOperator, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
+ self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = FDA.TangentOperator, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
+ self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = FDA.AdjointOperator, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
elif isinstance(__Function, dict) and \
("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
(__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
- self.__FO["Direct"] = Operator( name = self.__name, fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
- self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
- self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
+ self.__FO["Direct"] = Operator( name = self.__name, fromMethod = __Function["Direct"], reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
+ self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = __Function["Tangent"], reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
+ self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = __Function["Adjoint"], reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
elif asMatrix is not None:
- __matrice = numpy.matrix( __Matrix, numpy.float )
- self.__FO["Direct"] = Operator( name = self.__name, fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
- self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
- self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMatrix = __matrice.T, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
+ if isinstance(__Matrix, str):
+ __Matrix = PlatformInfo.strmatrix2liststr( __Matrix )
+ __matrice = numpy.asarray( __Matrix, dtype=float )
+ self.__FO["Direct"] = Operator( name = self.__name, fromMatrix = __matrice, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
+ self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMatrix = __matrice, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF )
+ self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMatrix = __matrice.T, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF )
del __matrice
else:
raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a matrix, a Direct operator for approximate derivatives or a Tangent/Adjoint operators pair. Please check your operator input."%self.__name)
#
if __appliedInX is not None:
self.__FO["AppliedInX"] = {}
- for key in list(__appliedInX.keys()):
- if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
- # Pour le cas où l'on a une vraie matrice
- self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
- elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
- # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
- self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
- else:
- self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
+ for key in __appliedInX:
+ if isinstance(__appliedInX[key], str):
+ __appliedInX[key] = PlatformInfo.strvect2liststr( __appliedInX[key] )
+ self.__FO["AppliedInX"][key] = numpy.ravel( __appliedInX[key] ).reshape((-1,1))
else:
self.__FO["AppliedInX"] = None
- CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
- CostFunctionJo : partie observations de la fonction-coût : Jo
- CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
+ - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
- CurrentOptimum : état optimal courant lors d'itérations
- CurrentState : état courant lors d'itérations
+ - CurrentStepNumber : pas courant d'avancement dans les algorithmes en évolution, à partir de 0
- GradientOfCostFunctionJ : gradient de la fonction-coût globale
- GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
- GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
- MahalanobisConsistency : indicateur de consistance des covariances
- OMA : Observation moins Analyse : Y - Xa
- OMB : Observation moins Background : Y - Xb
+ - ForecastCovariance : covariance de l'état prédit courant lors d'itérations
- ForecastState : état prédit courant lors d'itérations
- Residu : dans le cas des algorithmes de vérification
+ - SampledStateForQuantiles : échantillons d'états pour l'estimation des quantiles
- SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
- SigmaObs2 : indicateur de correction optimale des erreurs d'observation
- SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
#
self._name = str( name )
self._parameters = {"StoreSupplementaryCalculations":[]}
+ self.__internal_state = {}
self.__required_parameters = {}
self.__required_inputs = {
"RequiredInputValues":{"mandatory":(), "optional":()},
self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
+ self.__replace_by_the_new_name = {} # Nouveau nom à partir d'un nom ancien
#
self.StoredVariables = {}
self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
+ self.StoredVariables["CurrentEnsembleState"] = Persistence.OneMatrix(name = "CurrentEnsembleState")
+ self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
+ self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
+ self.StoredVariables["ForecastCovariance"] = Persistence.OneMatrix(name = "ForecastCovariance")
self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
+ self.StoredVariables["SampledStateForQuantiles"] = Persistence.OneMatrix(name = "SampledStateForQuantiles")
self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
self.__canonical_parameter_name["algorithm"] = "Algorithm"
self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
- def _pre_run(self, Parameters, Xb=None, Y=None, R=None, B=None, Q=None ):
+ def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
"Pré-calcul"
logging.debug("%s Lancement", self._name)
logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
+ self._getTimeState(reset=True)
#
# Mise a jour des paramètres internes avec le contenu de Parameters, en
# reprenant les valeurs par défauts pour toutes celles non définies
for k, v in self.__variable_names_not_public.items():
if k not in self._parameters: self.__setParameters( {k:v} )
#
- # Corrections et compléments
- def __test_vvalue(argument, variable, argname):
+ # Corrections et compléments des vecteurs
+ def __test_vvalue(argument, variable, argname, symbol=None):
+ if symbol is None: symbol = variable
if argument is None:
if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
- raise ValueError("%s %s vector %s has to be properly defined!"%(self._name,argname,variable))
+ raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol))
elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
- logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,variable))
+ logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol))
else:
- logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,variable))
+ logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol))
else:
- logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,variable,numpy.array(argument).size))
+ if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
+ logging.debug("%s %s vector %s is required and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
+ elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
+ logging.debug("%s %s vector %s is optional and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
+ else:
+ logging.debug("%s %s vector %s is set although neither required nor optional, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
return 0
__test_vvalue( Xb, "Xb", "Background or initial state" )
__test_vvalue( Y, "Y", "Observation" )
+ __test_vvalue( U, "U", "Control" )
#
- def __test_cvalue(argument, variable, argname):
+ # Corrections et compléments des covariances
+ def __test_cvalue(argument, variable, argname, symbol=None):
+ if symbol is None: symbol = variable
if argument is None:
if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
- raise ValueError("%s %s error covariance matrix %s has to be properly defined!"%(self._name,argname,variable))
+ raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol))
elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
- logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,variable))
+ logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol))
else:
- logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,variable))
+ logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol))
else:
- logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,variable))
+ if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
+ logging.debug("%s %s error covariance matrix %s is required and set."%(self._name,argname,symbol))
+ elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
+ logging.debug("%s %s error covariance matrix %s is optional and set."%(self._name,argname,symbol))
+ else:
+ logging.debug("%s %s error covariance matrix %s is set although neither required nor optional."%(self._name,argname,symbol))
return 0
- __test_cvalue( R, "R", "Observation" )
__test_cvalue( B, "B", "Background" )
+ __test_cvalue( R, "R", "Observation" )
__test_cvalue( Q, "Q", "Evolution" )
#
+ # Corrections et compléments des opérateurs
+ def __test_ovalue(argument, variable, argname, symbol=None):
+ if symbol is None: symbol = variable
+ if argument is None or (isinstance(argument,dict) and len(argument)==0):
+ if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
+ raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol))
+ elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
+ logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol))
+ else:
+ logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol))
+ else:
+ if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
+ logging.debug("%s %s operator %s is required and set."%(self._name,argname,symbol))
+ elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
+ logging.debug("%s %s operator %s is optional and set."%(self._name,argname,symbol))
+ else:
+ logging.debug("%s %s operator %s is set although neither required nor optional."%(self._name,argname,symbol))
+ return 0
+ __test_ovalue( HO, "HO", "Observation", "H" )
+ __test_ovalue( EM, "EM", "Evolution", "M" )
+ __test_ovalue( CM, "CM", "Control Model", "C" )
+ #
+ # Corrections et compléments des bornes
if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
- logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
+ logging.debug("%s Bounds taken into account"%(self._name,))
else:
self._parameters["Bounds"] = None
+ if ("StateBoundsForQuantiles" in self._parameters) and isinstance(self._parameters["StateBoundsForQuantiles"], (list, tuple)) and (len(self._parameters["StateBoundsForQuantiles"]) > 0):
+ logging.debug("%s Bounds for quantiles states taken into account"%(self._name,))
+ # Attention : contrairement à Bounds, pas de défaut à None, sinon on ne peut pas être sans bornes
+ #
+ # Corrections et compléments de l'initialisation en X
+ if "InitializationPoint" in self._parameters:
+ if Xb is not None:
+ if self._parameters["InitializationPoint"] is not None and hasattr(self._parameters["InitializationPoint"],'size'):
+ if self._parameters["InitializationPoint"].size != numpy.ravel(Xb).size:
+ raise ValueError("Incompatible size %i of forced initial point that have to replace the background of size %i" \
+ %(self._parameters["InitializationPoint"].size,numpy.ravel(Xb).size))
+ # Obtenu par typecast : numpy.ravel(self._parameters["InitializationPoint"])
+ else:
+ self._parameters["InitializationPoint"] = numpy.ravel(Xb)
+ else:
+ if self._parameters["InitializationPoint"] is None:
+ raise ValueError("Forced initial point can not be set without any given Background or required value")
+ #
+ # Correction pour pallier a un bug de TNC sur le retour du Minimum
+ if "Minimizer" in self._parameters and self._parameters["Minimizer"] == "TNC":
+ self.setParameterValue("StoreInternalVariables",True)
#
+ # Verbosité et logging
if logging.getLogger().level < logging.WARNING:
self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
- if PlatformInfo.has_scipy:
- import scipy.optimize
- self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
- else:
- self._parameters["optmessages"] = 15
+ self._parameters["optmessages"] = 15
else:
self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
- if PlatformInfo.has_scipy:
- import scipy.optimize
- self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
- else:
- self._parameters["optmessages"] = 15
+ self._parameters["optmessages"] = 0
#
return 0
logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
+ logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
logging.debug("%s Terminé", self._name)
return 0
"""
raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
- def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None):
+ def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None, listadv = None, oldname = None):
"""
Permet de définir dans l'algorithme des paramètres requis et leurs
caractéristiques par défaut.
"minval" : minval,
"maxval" : maxval,
"listval" : listval,
+ "listadv" : listadv,
"message" : message,
+ "oldname" : oldname,
}
self.__canonical_parameter_name[name.lower()] = name
+ if oldname is not None:
+ self.__canonical_parameter_name[oldname.lower()] = name # Conversion
+ self.__replace_by_the_new_name[oldname.lower()] = name
logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
def getRequiredParameters(self, noDetails=True):
minval = self.__required_parameters[__k]["minval"]
maxval = self.__required_parameters[__k]["maxval"]
listval = self.__required_parameters[__k]["listval"]
+ listadv = self.__required_parameters[__k]["listadv"]
#
if value is None and default is None:
__val = None
raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
if maxval is not None and (numpy.array(__val, float) > maxval).any():
raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
- if listval is not None:
+ if listval is not None or listadv is not None:
if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
for v in __val:
- if v not in listval:
+ if listval is not None and v in listval: continue
+ elif listadv is not None and v in listadv: continue
+ else:
raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
- elif __val not in listval:
+ elif not (listval is not None and __val in listval) and not (listadv is not None and __val in listadv):
raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
#
return __val
__inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
#~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
__canonic_fromDico_keys = __inverse_fromDico_keys.keys()
+ #
+ for k in __inverse_fromDico_keys.values():
+ if k.lower() in self.__replace_by_the_new_name:
+ __newk = self.__replace_by_the_new_name[k.lower()]
+ __msg = "the parameter '%s' used in '%s' algorithm case is deprecated and has to be replaced by '%s'. Please update your code."%(k,self._name,__newk)
+ warnings.warn(__msg, FutureWarning, stacklevel=50)
+ #
for k in self.__required_parameters.keys():
if k in __canonic_fromDico_keys:
self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
self._parameters[k] = self.setParameterValue(k)
else:
pass
- logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
+ if hasattr(self._parameters[k],"__len__") and len(self._parameters[k]) > 100:
+ logging.debug("%s %s de longueur %s", self._name, self.__required_parameters[k]["message"], len(self._parameters[k]))
+ else:
+ logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
+
+ def _setInternalState(self, key=None, value=None, fromDico={}, reset=False):
+ """
+ Permet de stocker des variables nommées constituant l'état interne
+ """
+ if reset: # Vide le dictionnaire préalablement
+ self.__internal_state = {}
+ if key is not None and value is not None:
+ self.__internal_state[key] = value
+ self.__internal_state.update( dict(fromDico) )
+
+ def _getInternalState(self, key=None):
+ """
+ Restitue un état interne sous la forme d'un dictionnaire de variables nommées
+ """
+ if key is not None and key in self.__internal_state:
+ return self.__internal_state[key]
+ else:
+ return self.__internal_state
+
+ def _getTimeState(self, reset=False):
+ """
+ Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde
+ """
+ if reset:
+ self.__initial_cpu_time = time.process_time()
+ self.__initial_elapsed_time = time.perf_counter()
+ return 0., 0.
+ else:
+ self.__cpu_time = time.process_time() - self.__initial_cpu_time
+ self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time
+ return self.__cpu_time, self.__elapsed_time
+
+ def _StopOnTimeLimit(self, X=None, withReason=False):
+ "Stop criteria on time limit: True/False [+ Reason]"
+ c, e = self._getTimeState()
+ if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]:
+ __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"])
+ elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]:
+ __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"])
+ else:
+ __SC, __SR = False, ""
+ if withReason:
+ return __SC, __SR
+ else:
+ return __SC
+
+# ==============================================================================
+class PartialAlgorithm(object):
+ """
+ Classe pour mimer "Algorithm" du point de vue stockage, mais sans aucune
+ action avancée comme la vérification . Pour les méthodes reprises ici,
+ le fonctionnement est identique à celles de la classe "Algorithm".
+ """
+ def __init__(self, name):
+ self._name = str( name )
+ self._parameters = {"StoreSupplementaryCalculations":[]}
+ #
+ self.StoredVariables = {}
+ self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
+ self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
+ self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
+ self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
+ self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
+ self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
+ #
+ self.__canonical_stored_name = {}
+ for k in self.StoredVariables:
+ self.__canonical_stored_name[k.lower()] = k
+
+ def _toStore(self, key):
+ "True if in StoreSupplementaryCalculations, else False"
+ return key in self._parameters["StoreSupplementaryCalculations"]
+
+ def get(self, key=None):
+ """
+ Renvoie l'une des variables stockées identifiée par la clé, ou le
+ dictionnaire de l'ensemble des variables disponibles en l'absence de
+ clé. Ce sont directement les variables sous forme objet qui sont
+ renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
+ des classes de persistance.
+ """
+ if key is not None:
+ return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
+ else:
+ return self.StoredVariables
# ==============================================================================
class AlgorithmAndParameters(object):
if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
if self.__algorithmName in ["EnsembleBlue",]:
asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
- self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
+ self.__Xb = Persistence.OneVector("Background")
for member in asPersistentVector:
- self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
+ self.__Xb.store( numpy.asarray(member, dtype=float) )
__Xb_shape = min(__B_shape)
else:
raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
%(len(self.__P["Bounds"]),max(__Xb_shape)))
#
+ if ("StateBoundsForQuantiles" in self.__P) \
+ and (isinstance(self.__P["StateBoundsForQuantiles"], list) or isinstance(self.__P["StateBoundsForQuantiles"], tuple)) \
+ and (len(self.__P["StateBoundsForQuantiles"]) != max(__Xb_shape)):
+ raise ValueError("The number \"%s\" of bound pairs for the quantile state (X) components is different of the size \"%s\" of the state itself." \
+ %(len(self.__P["StateBoundsForQuantiles"]),max(__Xb_shape)))
+ #
return 1
# ==============================================================================
#
if __Vector is not None:
self.__is_vector = True
- self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
+ if isinstance(__Vector, str):
+ __Vector = PlatformInfo.strvect2liststr( __Vector )
+ self.__V = numpy.ravel(numpy.asarray( __Vector, dtype=float )).reshape((-1,1))
self.shape = self.__V.shape
self.size = self.__V.size
elif __Series is not None:
self.__is_series = True
if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
- self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
- if isinstance(__Series, str): __Series = eval(__Series)
+ self.__V = Persistence.OneVector(self.__name)
+ if isinstance(__Series, str):
+ __Series = PlatformInfo.strmatrix2liststr(__Series)
for member in __Series:
- self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
+ if isinstance(member, str):
+ member = PlatformInfo.strvect2liststr( member )
+ self.__V.store(numpy.asarray( member, dtype=float ))
else:
self.__V = __Series
if isinstance(self.__V.shape, (tuple, list)):
__Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
#
if __Scalar is not None:
- if numpy.matrix(__Scalar).size != 1:
- raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size)
+ if isinstance(__Scalar, str):
+ __Scalar = PlatformInfo.strvect2liststr( __Scalar )
+ if len(__Scalar) > 0: __Scalar = __Scalar[0]
+ if numpy.array(__Scalar).size != 1:
+ raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.array(__Scalar).size)
self.__is_scalar = True
self.__C = numpy.abs( float(__Scalar) )
self.shape = (0,0)
self.size = 0
elif __Vector is not None:
+ if isinstance(__Vector, str):
+ __Vector = PlatformInfo.strvect2liststr( __Vector )
self.__is_vector = True
- self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) )
+ self.__C = numpy.abs( numpy.ravel(numpy.asarray( __Vector, dtype=float )) )
self.shape = (self.__C.size,self.__C.size)
self.size = self.__C.size**2
elif __Matrix is not None:
elif __Object is not None:
self.__is_object = True
self.__C = __Object
- for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"):
+ for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__matmul__","__mul__","__rmatmul__","__rmul__"):
if not hasattr(self.__C,at):
raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
if hasattr(self.__C,"shape"):
raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
try:
- L = numpy.linalg.cholesky( self.__C )
+ numpy.linalg.cholesky( self.__C )
except:
raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
try:
- L = self.__C.cholesky()
+ self.__C.cholesky()
except:
raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
def getI(self):
"Inversion"
if self.ismatrix():
- return Covariance(self.__name+"I", asCovariance = self.__C.I )
+ return Covariance(self.__name+"I", asCovariance = numpy.linalg.inv(self.__C) )
elif self.isvector():
return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
elif self.isscalar():
return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
- elif self.isobject():
+ elif self.isobject() and hasattr(self.__C,"getI"):
return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
else:
return None # Indispensable
return Covariance(self.__name+"T", asEyeByVector = self.__C )
elif self.isscalar():
return Covariance(self.__name+"T", asEyeByScalar = self.__C )
- elif self.isobject():
+ elif self.isobject() and hasattr(self.__C,"getT"):
return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
+ else:
+ raise AttributeError("the %s covariance matrix has no getT attribute."%(self.__name,))
def cholesky(self):
"Décomposition de Cholesky"
return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
elif self.isobject() and hasattr(self.__C,"cholesky"):
return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
+ else:
+ raise AttributeError("the %s covariance matrix has no cholesky attribute."%(self.__name,))
def choleskyI(self):
"Inversion de la décomposition de Cholesky"
if self.ismatrix():
- return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I )
+ return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.linalg.cholesky(self.__C)) )
elif self.isvector():
return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
elif self.isscalar():
return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
elif self.isobject() and hasattr(self.__C,"choleskyI"):
return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
+ else:
+ raise AttributeError("the %s covariance matrix has no choleskyI attribute."%(self.__name,))
+
+ def sqrtm(self):
+ "Racine carrée matricielle"
+ if self.ismatrix():
+ import scipy
+ return Covariance(self.__name+"C", asCovariance = numpy.real(scipy.linalg.sqrtm(self.__C)) )
+ elif self.isvector():
+ return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
+ elif self.isscalar():
+ return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
+ elif self.isobject() and hasattr(self.__C,"sqrtm"):
+ return Covariance(self.__name+"C", asCovObject = self.__C.sqrtm() )
+ else:
+ raise AttributeError("the %s covariance matrix has no sqrtm attribute."%(self.__name,))
+
+ def sqrtmI(self):
+ "Inversion de la racine carrée matricielle"
+ if self.ismatrix():
+ import scipy
+ return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.real(scipy.linalg.sqrtm(self.__C))) )
+ elif self.isvector():
+ return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
+ elif self.isscalar():
+ return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
+ elif self.isobject() and hasattr(self.__C,"sqrtmI"):
+ return Covariance(self.__name+"H", asCovObject = self.__C.sqrtmI() )
+ else:
+ raise AttributeError("the %s covariance matrix has no sqrtmI attribute."%(self.__name,))
def diag(self, msize=None):
"Diagonale de la matrice"
raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
else:
return self.__C * numpy.ones(int(msize))
- elif self.isobject():
+ elif self.isobject() and hasattr(self.__C,"diag"):
return self.__C.diag()
-
- def asfullmatrix(self, msize=None):
- "Matrice pleine"
- if self.ismatrix():
- return self.__C
- elif self.isvector():
- return numpy.matrix( numpy.diag(self.__C), float )
- elif self.isscalar():
- if msize is None:
- raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
- else:
- return numpy.matrix( self.__C * numpy.eye(int(msize)), float )
- elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
- return self.__C.asfullmatrix()
+ else:
+ raise AttributeError("the %s covariance matrix has no diag attribute."%(self.__name,))
def trace(self, msize=None):
"Trace de la matrice"
return self.__C * int(msize)
elif self.isobject():
return self.__C.trace()
+ else:
+ raise AttributeError("the %s covariance matrix has no trace attribute."%(self.__name,))
+
+ def asfullmatrix(self, msize=None):
+ "Matrice pleine"
+ if self.ismatrix():
+ return numpy.asarray(self.__C, dtype=float)
+ elif self.isvector():
+ return numpy.asarray( numpy.diag(self.__C), dtype=float )
+ elif self.isscalar():
+ if msize is None:
+ raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
+ else:
+ return numpy.asarray( self.__C * numpy.eye(int(msize)), dtype=float )
+ elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
+ return self.__C.asfullmatrix()
+ else:
+ raise AttributeError("the %s covariance matrix has no asfullmatrix attribute."%(self.__name,))
+
+ def assparsematrix(self):
+ "Valeur sparse"
+ return self.__C
def getO(self):
return self
return self.__C + numpy.asmatrix(other)
elif self.isvector() or self.isscalar():
_A = numpy.asarray(other)
- _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
+ if len(_A.shape) == 1:
+ _A.reshape((-1,1))[::2] += self.__C
+ else:
+ _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
return numpy.asmatrix(_A)
def __radd__(self, other):
"x.__neg__() <==> -x"
return - self.__C
+ def __matmul__(self, other):
+ "x.__mul__(y) <==> x@y"
+ if self.ismatrix() and isinstance(other, (int, float)):
+ return numpy.asarray(self.__C) * other
+ elif self.ismatrix() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
+ if numpy.ravel(other).size == self.shape[1]: # Vecteur
+ return numpy.ravel(self.__C @ numpy.ravel(other))
+ elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
+ return numpy.asarray(self.__C) @ numpy.asarray(other)
+ else:
+ raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asarray(other).shape,self.__name))
+ elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
+ if numpy.ravel(other).size == self.shape[1]: # Vecteur
+ return numpy.ravel(self.__C) * numpy.ravel(other)
+ elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
+ return numpy.ravel(self.__C).reshape((-1,1)) * numpy.asarray(other)
+ else:
+ raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
+ elif self.isscalar() and isinstance(other,numpy.matrix):
+ return numpy.asarray(self.__C * other)
+ elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
+ if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
+ return self.__C * numpy.ravel(other)
+ else:
+ return self.__C * numpy.asarray(other)
+ elif self.isobject():
+ return self.__C.__matmul__(other)
+ else:
+ raise NotImplementedError("%s covariance matrix __matmul__ method not available for %s type!"%(self.__name,type(other)))
+
def __mul__(self, other):
"x.__mul__(y) <==> x*y"
if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
else:
raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
+ def __rmatmul__(self, other):
+ "x.__rmul__(y) <==> y@x"
+ if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
+ return other * self.__C
+ elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
+ if numpy.ravel(other).size == self.shape[1]: # Vecteur
+ return numpy.asmatrix(numpy.ravel(other)) * self.__C
+ elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
+ return numpy.asmatrix(other) * self.__C
+ else:
+ raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
+ elif self.isvector() and isinstance(other,numpy.matrix):
+ if numpy.ravel(other).size == self.shape[0]: # Vecteur
+ return numpy.asmatrix(numpy.ravel(other) * self.__C)
+ elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
+ return numpy.asmatrix(numpy.array(other) * self.__C)
+ else:
+ raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
+ elif self.isscalar() and isinstance(other,numpy.matrix):
+ return other * self.__C
+ elif self.isobject():
+ return self.__C.__rmatmul__(other)
+ else:
+ raise NotImplementedError("%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name,type(other)))
+
def __rmul__(self, other):
"x.__rmul__(y) <==> y*x"
if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
elif self.isscalar() and isinstance(other,numpy.matrix):
return other * self.__C
+ elif self.isscalar() and isinstance(other,float):
+ return other * self.__C
elif self.isobject():
return self.__C.__rmul__(other)
else:
# ==============================================================================
class Observer2Func(object):
"""
- Creation d'une fonction d'observateur a partir de son texte
+ Création d'une fonction d'observateur a partir de son texte
"""
def __init__(self, corps=""):
self.__corps = corps
# ==============================================================================
class CaseLogger(object):
"""
- Conservation des commandes de creation d'un cas
+ Conservation des commandes de création d'un cas
"""
def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
self.__name = str(__name)
"TUI" :Interfaces._TUIViewer,
"SCD" :Interfaces._SCDViewer,
"YACS":Interfaces._YACSViewer,
+ "SimpleReportInRst":Interfaces._SimpleReportInRstViewer,
+ "SimpleReportInHtml":Interfaces._SimpleReportInHtmlViewer,
+ "SimpleReportInPlainTxt":Interfaces._SimpleReportInPlainTxtViewer,
}
self.__loaders = {
"TUI" :Interfaces._TUIViewer,
#
# Calculs effectifs
if __mpEnabled:
- _jobs = []
- if _extraArguments is None:
- _jobs = __xserie
- elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
- for __xvalue in __xserie:
- _jobs.append( [__xvalue, ] + list(_extraArguments) )
- else:
- raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
+ _jobs = __xserie
# logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
- import multiprocessing
with multiprocessing.Pool(__mpWorkers) as pool:
__multiHX = pool.map( _sFunction, _jobs )
pool.close()
# logging.debug("MULTF Internal multifonction calculations end")
return __multiHX
-# ==============================================================================
-def CostFunction3D(_x,
- _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
- _HmX = None, # Simulation déjà faite de Hm(x)
- _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
- _BI = None,
- _RI = None,
- _Xb = None,
- _Y = None,
- _SIV = False, # A résorber pour la 8.0
- _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
- _nPS = 0, # nbPreviousSteps
- _QM = "DA", # QualityMeasure
- _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
- _fRt = False, # Restitue ou pas la sortie étendue
- _sSc = True, # Stocke ou pas les SSC
- ):
- """
- Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
- et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
- DFO, QuantileRegression
- """
- if not _sSc:
- _SIV = False
- _SSC = {}
- else:
- for k in ["CostFunctionJ",
- "CostFunctionJb",
- "CostFunctionJo",
- "CurrentOptimum",
- "CurrentState",
- "IndexOfOptimum",
- "SimulatedObservationAtCurrentOptimum",
- "SimulatedObservationAtCurrentState",
- ]:
- if k not in _SSV:
- _SSV[k] = []
- if hasattr(_SSV[k],"store"):
- _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
- #
- _X = numpy.asmatrix(numpy.ravel( _x )).T
- if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
- _SSV["CurrentState"].append( _X )
- #
- if _HmX is not None:
- _HX = _HmX
- else:
- if _Hm is None:
- raise ValueError("COSTFUNCTION3D Operator has to be defined.")
- if _arg is None:
- _HX = _Hm( _X )
- else:
- _HX = _Hm( _X, *_arg )
- _HX = numpy.asmatrix(numpy.ravel( _HX )).T
- #
- if "SimulatedObservationAtCurrentState" in _SSC or \
- "SimulatedObservationAtCurrentOptimum" in _SSC:
- _SSV["SimulatedObservationAtCurrentState"].append( _HX )
- #
- if numpy.any(numpy.isnan(_HX)):
- Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
- else:
- _Y = numpy.asmatrix(numpy.ravel( _Y )).T
- if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
- if _BI is None or _RI is None:
- raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
- _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
- Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
- Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
- elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
- if _RI is None:
- raise ValueError("Observation error covariance matrix has to be properly defined!")
- Jb = 0.
- Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
- elif _QM in ["LeastSquares", "LS", "L2"]:
- Jb = 0.
- Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
- elif _QM in ["AbsoluteValue", "L1"]:
- Jb = 0.
- Jo = numpy.sum( numpy.abs(_Y - _HX) )
- elif _QM in ["MaximumError", "ME"]:
- Jb = 0.
- Jo = numpy.max( numpy.abs(_Y - _HX) )
- elif _QM in ["QR", "Null"]:
- Jb = 0.
- Jo = 0.
- else:
- raise ValueError("Unknown asked quality measure!")
- #
- J = float( Jb ) + float( Jo )
- #
- if _sSc:
- _SSV["CostFunctionJb"].append( Jb )
- _SSV["CostFunctionJo"].append( Jo )
- _SSV["CostFunctionJ" ].append( J )
- #
- if "IndexOfOptimum" in _SSC or \
- "CurrentOptimum" in _SSC or \
- "SimulatedObservationAtCurrentOptimum" in _SSC:
- IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
- if "IndexOfOptimum" in _SSC:
- _SSV["IndexOfOptimum"].append( IndexMin )
- if "CurrentOptimum" in _SSC:
- _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
- if "SimulatedObservationAtCurrentOptimum" in _SSC:
- _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
- #
- if _fRt:
- return _SSV
- else:
- if _QM in ["QR"]: # Pour le QuantileRegression
- return _HX
- else:
- return J
-
# ==============================================================================
if __name__ == "__main__":
print('\n AUTODIAGNOSTIC\n')