X-Git-Url: http://git.salome-platform.org/gitweb/?a=blobdiff_plain;f=src%2FdaComposant%2FdaCore%2FBasicObjects.py;h=eefbea3806812c12230658e7ad229270ecbf40a7;hb=a6845547d27bcdc0928f898a4c8a2e4fc276c69e;hp=5a08802b8e225b9319db179931f25f4babce8f0c;hpb=7672546d765b288764e7bcc785340c66322998bb;p=modules%2Fadao.git diff --git a/src/daComposant/daCore/BasicObjects.py b/src/daComposant/daCore/BasicObjects.py index 5a08802..eefbea3 100644 --- a/src/daComposant/daCore/BasicObjects.py +++ b/src/daComposant/daCore/BasicObjects.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright (C) 2008-2019 EDF R&D +# Copyright (C) 2008-2023 EDF R&D # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public @@ -30,7 +30,9 @@ import os import sys import logging import copy +import time import numpy +import warnings from functools import partial from daCore import Persistence, PlatformInfo, Interfaces from daCore import Templates @@ -42,58 +44,72 @@ class CacheManager(object): """ def __init__(self, toleranceInRedundancy = 1.e-18, - lenghtOfRedundancy = -1, + lengthOfRedundancy = -1, ): """ - Les caractéristiques de tolérance peuvent être modifées à la création. + Les caractéristiques de tolérance peuvent être modifiées à la création. """ - self.__tolerBP = float(toleranceInRedundancy) - self.__lenghtOR = int(lenghtOfRedundancy) - self.__initlnOR = self.__lenghtOR + self.__tolerBP = float(toleranceInRedundancy) + self.__lengthOR = int(lengthOfRedundancy) + self.__initlnOR = self.__lengthOR + self.__seenNames = [] + self.__enabled = True self.clearCache() def clearCache(self): "Vide le cache" - self.__listOPCV = [] # Operator Previous Calculated Points, Results, Point Norms - # logging.debug("CM Tolerance de determination des doublons : %.2e", self.__tolerBP) + self.__listOPCV = [] + self.__seenNames = [] - def wasCalculatedIn(self, xValue ): #, info="" ): + def wasCalculatedIn(self, xValue, oName="" ): "Vérifie l'existence d'un calcul correspondant à la valeur" __alc = False __HxV = None - for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1): - if not hasattr(xValue, 'size') or (xValue.size != self.__listOPCV[i][0].size): - # logging.debug("CM Différence de la taille %s de X et de celle %s du point %i déjà calculé", xValue.shape,i,self.__listOPCP[i].shape) - continue - if numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < self.__tolerBP * self.__listOPCV[i][2]: - __alc = True - __HxV = self.__listOPCV[i][1] - # logging.debug("CM Cas%s déja calculé, portant le numéro %i", info, i) - break + if self.__enabled: + for i in range(min(len(self.__listOPCV),self.__lengthOR)-1,-1,-1): + if not hasattr(xValue, 'size'): + pass + elif (str(oName) != self.__listOPCV[i][3]): + pass + elif (xValue.size != self.__listOPCV[i][0].size): + pass + elif (numpy.ravel(xValue)[0] - self.__listOPCV[i][0][0]) > (self.__tolerBP * self.__listOPCV[i][2] / self.__listOPCV[i][0].size): + pass + elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < (self.__tolerBP * self.__listOPCV[i][2]): + __alc = True + __HxV = self.__listOPCV[i][1] + break return __alc, __HxV - def storeValueInX(self, xValue, HxValue ): - "Stocke un calcul correspondant à la valeur" - if self.__lenghtOR < 0: - self.__lenghtOR = 2 * xValue.size + 2 - self.__initlnOR = self.__lenghtOR - while len(self.__listOPCV) > self.__lenghtOR: - # logging.debug("CM Réduction de la liste des cas à %i éléments par suppression du premier", self.__lenghtOR) + def storeValueInX(self, xValue, HxValue, oName="" ): + "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x" + if self.__lengthOR < 0: + self.__lengthOR = 2 * min(numpy.size(xValue), 50) + 2 + self.__initlnOR = self.__lengthOR + self.__seenNames.append(str(oName)) + if str(oName) not in self.__seenNames: # Etend la liste si nouveau + self.__lengthOR += 2 * min(numpy.size(xValue), 50) + 2 + self.__initlnOR += self.__lengthOR + self.__seenNames.append(str(oName)) + while len(self.__listOPCV) > self.__lengthOR: self.__listOPCV.pop(0) self.__listOPCV.append( ( - copy.copy(numpy.ravel(xValue)), - copy.copy(HxValue), - numpy.linalg.norm(xValue), + copy.copy(numpy.ravel(xValue)), # 0 Previous point + copy.copy(HxValue), # 1 Previous value + numpy.linalg.norm(xValue), # 2 Norm + str(oName), # 3 Operator name ) ) def disable(self): "Inactive le cache" - self.__initlnOR = self.__lenghtOR - self.__lenghtOR = 0 + self.__initlnOR = self.__lengthOR + self.__lengthOR = 0 + self.__enabled = False def enable(self): "Active le cache" - self.__lenghtOR = self.__initlnOR + self.__lengthOR = self.__initlnOR + self.__enabled = True # ============================================================================== class Operator(object): @@ -106,9 +122,11 @@ class Operator(object): CM = CacheManager() # def __init__(self, + name = "GenericOperator", fromMethod = None, fromMatrix = None, avoidingRedundancy = True, + reducingMemoryUse = False, inputAsMultiFunction = False, enableMultiProcess = False, extraArguments = None, @@ -118,16 +136,21 @@ class Operator(object): deux mots-clé, soit une fonction ou un multi-fonction python, soit une matrice. Arguments : + - name : nom d'opérateur - fromMethod : argument de type fonction Python - - fromMatrix : argument adapté au constructeur numpy.matrix + - fromMatrix : argument adapté au constructeur numpy.array/matrix - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants + - reducingMemoryUse : booléen forçant (ou pas) des calculs moins + gourmands en mémoire - inputAsMultiFunction : booléen indiquant une fonction explicitement définie (ou pas) en multi-fonction - extraArguments : arguments supplémentaires passés à la fonction de base et ses dérivées (tuple ou dictionnaire) """ + self.__name = str(name) self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0 - self.__AvoidRC = bool( avoidingRedundancy ) + self.__reduceM = bool( reducingMemoryUse ) + self.__avoidRC = bool( avoidingRedundancy ) self.__inputAsMF = bool( inputAsMultiFunction ) self.__mpEnabled = bool( enableMultiProcess ) self.__extraArgs = extraArguments @@ -141,7 +164,9 @@ class Operator(object): self.__Type = "Method" elif fromMatrix is not None: self.__Method = None - self.__Matrix = numpy.matrix( fromMatrix, numpy.float ) + if isinstance(fromMatrix, str): + fromMatrix = PlatformInfo.strmatrix2liststr( fromMatrix ) + self.__Matrix = numpy.asarray( fromMatrix, dtype=float ) self.__Type = "Matrix" else: self.__Method = None @@ -154,7 +179,7 @@ class Operator(object): def enableAvoidingRedundancy(self): "Active le cache" - if self.__AvoidRC: + if self.__avoidRC: Operator.CM.enable() else: Operator.CM.disable() @@ -163,7 +188,7 @@ class Operator(object): "Renvoie le type" return self.__Type - def appliedTo(self, xValue, HValue = None, argsAsSerie = False): + def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False): """ Permet de restituer le résultat de l'application de l'opérateur à une série d'arguments xValue. Cette méthode se contente d'appliquer, chaque @@ -187,18 +212,18 @@ class Operator(object): # if _HValue is not None: assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue" - HxValue = [] + _HxValue = [] for i in range(len(_HValue)): - HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T ) - if self.__AvoidRC: - Operator.CM.storeValueInX(_xValue[i],HxValue[-1]) + _HxValue.append( _HValue[i] ) + if self.__avoidRC: + Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name) else: - HxValue = [] + _HxValue = [] _xserie = [] _hindex = [] for i, xv in enumerate(_xValue): - if self.__AvoidRC: - __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv) + if self.__avoidRC: + __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name) else: __alreadyCalculated = False # @@ -208,13 +233,13 @@ class Operator(object): else: if self.__Matrix is not None: self.__addOneMatrixCall() - _hv = self.__Matrix * xv + _hv = self.__Matrix @ numpy.ravel(xv) else: self.__addOneMethodCall() _xserie.append( xv ) _hindex.append( i ) _hv = None - HxValue.append( _hv ) + _HxValue.append( _hv ) # if len(_xserie)>0 and self.__Matrix is None: if self.__extraArgs is None: @@ -222,18 +247,25 @@ class Operator(object): else: _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF if not hasattr(_hserie, "pop"): - raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.") + raise TypeError( + "The user input multi-function doesn't seem to return a"+\ + " result sequence, behaving like a mono-function. It has"+\ + " to be checked." + ) for i in _hindex: _xv = _xserie.pop(0) _hv = _hserie.pop(0) - HxValue[i] = _hv - if self.__AvoidRC: - Operator.CM.storeValueInX(_xv,_hv) + _HxValue[i] = _hv + if self.__avoidRC: + Operator.CM.storeValueInX(_xv,_hv,self.__name) + # + if returnSerieAsArrayMatrix: + _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1) # - if argsAsSerie: return HxValue - else: return HxValue[-1] + if argsAsSerie: return _HxValue + else: return _HxValue[-1] - def appliedControledFormTo(self, paires, argsAsSerie = False): + def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False): """ Permet de restituer le résultat de l'application de l'opérateur à des paires (xValue, uValue). Cette méthode se contente d'appliquer, son @@ -250,30 +282,32 @@ class Operator(object): PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" ) # if self.__Matrix is not None: - HxValue = [] + _HxValue = [] for paire in _xuValue: _xValue, _uValue = paire self.__addOneMatrixCall() - HxValue.append( self.__Matrix * _xValue ) + _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) ) else: - HxValue = [] + _xuArgs = [] for paire in _xuValue: - _xuValue = [] _xValue, _uValue = paire if _uValue is not None: - _xuValue.append( paire ) + _xuArgs.append( paire ) else: - _xuValue.append( _xValue ) - self.__addOneMethodCall( len(_xuValue) ) + _xuArgs.append( _xValue ) + self.__addOneMethodCall( len(_xuArgs) ) if self.__extraArgs is None: - HxValue = self.__Method( _xuValue ) # Calcul MF + _HxValue = self.__Method( _xuArgs ) # Calcul MF else: - HxValue = self.__Method( _xuValue, self.__extraArgs ) # Calcul MF + _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF + # + if returnSerieAsArrayMatrix: + _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1) # - if argsAsSerie: return HxValue - else: return HxValue[-1] + if argsAsSerie: return _HxValue + else: return _HxValue[-1] - def appliedInXTo(self, paires, argsAsSerie = False): + def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False): """ Permet de restituer le résultat de l'application de l'opérateur à une série d'arguments xValue, sachant que l'opérateur est valable en @@ -294,20 +328,23 @@ class Operator(object): PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" ) # if self.__Matrix is not None: - HxValue = [] + _HxValue = [] for paire in _nxValue: _xNominal, _xValue = paire self.__addOneMatrixCall() - HxValue.append( self.__Matrix * _xValue ) + _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) ) else: self.__addOneMethodCall( len(_nxValue) ) if self.__extraArgs is None: - HxValue = self.__Method( _nxValue ) # Calcul MF + _HxValue = self.__Method( _nxValue ) # Calcul MF else: - HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF + _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF # - if argsAsSerie: return HxValue - else: return HxValue[-1] + if returnSerieAsArrayMatrix: + _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1) + # + if argsAsSerie: return _HxValue + else: return _HxValue[-1] def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False): """ @@ -316,12 +353,12 @@ class Operator(object): if self.__Matrix is not None: self.__addOneMatrixCall() mValue = [self.__Matrix,] - elif ValueForMethodForm is not "UnknownVoidValue": # Ne pas utiliser "None" + elif not isinstance(ValueForMethodForm,str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None" mValue = [] if argsAsSerie: self.__addOneMethodCall( len(ValueForMethodForm) ) for _vfmf in ValueForMethodForm: - mValue.append( numpy.matrix( self.__Method(((_vfmf, None),)) ) ) + mValue.append( self.__Method(((_vfmf, None),)) ) else: self.__addOneMethodCall() mValue = self.__Method(((ValueForMethodForm, None),)) @@ -388,7 +425,7 @@ class FullOperator(object): asDict = None, # Parameters appliedInX = None, extraArguments = None, - avoidRC = True, + performancePrf = None, inputAsMF = False,# Fonction(s) as Multi-Functions scheduledBy = None, toBeChecked = False, @@ -415,6 +452,15 @@ class FullOperator(object): __Parameters["EnableMultiProcessingInEvaluation"] = False if "withIncrement" in __Parameters: # Temporaire __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"] + # Le défaut est équivalent à "ReducedOverallRequirements" + __reduceM, __avoidRC = True, True + if performancePrf is not None: + if performancePrf == "ReducedAmountOfCalculation": + __reduceM, __avoidRC = False, True + elif performancePrf == "ReducedMemoryFootprint": + __reduceM, __avoidRC = True, False + elif performancePrf == "NoSavings": + __reduceM, __avoidRC = False, False # if asScript is not None: __Matrix, __Function = None, None @@ -454,19 +500,16 @@ class FullOperator(object): __Function = asThreeFunctions __Function.update({"useApproximatedDerivatives":True}) else: - raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")") + raise ValueError( + "The functions has to be given in a dictionnary which have either"+\ + " 1 key (\"Direct\") or"+\ + " 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")") if "Direct" not in asThreeFunctions: __Function["Direct"] = asThreeFunctions["Tangent"] __Function.update(__Parameters) else: __Function = None # - # if sys.version_info[0] < 3 and isinstance(__Function, dict): - #  for k in ("Direct", "Tangent", "Adjoint"): - #  if k in __Function and hasattr(__Function[k],"__class__"): - #  if type(__Function[k]) is type(self.__init__): - #  raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k)) - # if appliedInX is not None and isinstance(appliedInX, dict): __appliedInX = appliedInX elif appliedInX is not None: @@ -483,53 +526,112 @@ class FullOperator(object): if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01 if "withdX" not in __Function: __Function["withdX"] = None - if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = avoidRC + if "withReducingMemoryUse" not in __Function: __Function["withReducingMemoryUse"] = __reduceM + if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = __avoidRC if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18 - if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1 + if "withLengthOfRedundancy" not in __Function: __Function["withLengthOfRedundancy"] = -1 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF from daCore import NumericObjects FDA = NumericObjects.FDApproximation( + name = self.__name, Function = __Function["Direct"], centeredDF = __Function["CenteredFiniteDifference"], increment = __Function["DifferentialIncrement"], dX = __Function["withdX"], + extraArguments = self.__extraArgs, + reducingMemoryUse = __Function["withReducingMemoryUse"], avoidingRedundancy = __Function["withAvoidingRedundancy"], toleranceInRedundancy = __Function["withToleranceInRedundancy"], - lenghtOfRedundancy = __Function["withLenghtOfRedundancy"], + lengthOfRedundancy = __Function["withLengthOfRedundancy"], mpEnabled = __Function["EnableMultiProcessingInDerivatives"], mpWorkers = __Function["NumberOfProcesses"], mfEnabled = __Function["withmfEnabled"], ) - self.__FO["Direct"] = Operator( fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] ) - self.__FO["Tangent"] = Operator( fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs ) - self.__FO["Adjoint"] = Operator( fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs ) + self.__FO["Direct"] = Operator( + name = self.__name, + fromMethod = FDA.DirectOperator, + reducingMemoryUse = __reduceM, + avoidingRedundancy = __avoidRC, + inputAsMultiFunction = inputAsMF, + extraArguments = self.__extraArgs, + enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] ) + self.__FO["Tangent"] = Operator( + name = self.__name+"Tangent", + fromMethod = FDA.TangentOperator, + reducingMemoryUse = __reduceM, + avoidingRedundancy = __avoidRC, + inputAsMultiFunction = inputAsMF, + extraArguments = self.__extraArgs ) + self.__FO["Adjoint"] = Operator( + name = self.__name+"Adjoint", + fromMethod = FDA.AdjointOperator, + reducingMemoryUse = __reduceM, + avoidingRedundancy = __avoidRC, + inputAsMultiFunction = inputAsMF, + extraArguments = self.__extraArgs ) elif isinstance(__Function, dict) and \ ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \ (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None): - self.__FO["Direct"] = Operator( fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] ) - self.__FO["Tangent"] = Operator( fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs ) - self.__FO["Adjoint"] = Operator( fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs ) + self.__FO["Direct"] = Operator( + name = self.__name, + fromMethod = __Function["Direct"], + reducingMemoryUse = __reduceM, + avoidingRedundancy = __avoidRC, + inputAsMultiFunction = inputAsMF, + extraArguments = self.__extraArgs, + enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] ) + self.__FO["Tangent"] = Operator( + name = self.__name+"Tangent", + fromMethod = __Function["Tangent"], + reducingMemoryUse = __reduceM, + avoidingRedundancy = __avoidRC, + inputAsMultiFunction = inputAsMF, + extraArguments = self.__extraArgs ) + self.__FO["Adjoint"] = Operator( + name = self.__name+"Adjoint", + fromMethod = __Function["Adjoint"], + reducingMemoryUse = __reduceM, + avoidingRedundancy = __avoidRC, + inputAsMultiFunction = inputAsMF, + extraArguments = self.__extraArgs ) elif asMatrix is not None: - __matrice = numpy.matrix( __Matrix, numpy.float ) - self.__FO["Direct"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] ) - self.__FO["Tangent"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF ) - self.__FO["Adjoint"] = Operator( fromMatrix = __matrice.T, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF ) + if isinstance(__Matrix, str): + __Matrix = PlatformInfo.strmatrix2liststr( __Matrix ) + __matrice = numpy.asarray( __Matrix, dtype=float ) + self.__FO["Direct"] = Operator( + name = self.__name, + fromMatrix = __matrice, + reducingMemoryUse = __reduceM, + avoidingRedundancy = __avoidRC, + inputAsMultiFunction = inputAsMF, + enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] ) + self.__FO["Tangent"] = Operator( + name = self.__name+"Tangent", + fromMatrix = __matrice, + reducingMemoryUse = __reduceM, + avoidingRedundancy = __avoidRC, + inputAsMultiFunction = inputAsMF ) + self.__FO["Adjoint"] = Operator( + name = self.__name+"Adjoint", + fromMatrix = __matrice.T, + reducingMemoryUse = __reduceM, + avoidingRedundancy = __avoidRC, + inputAsMultiFunction = inputAsMF ) del __matrice else: - raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a matrix, a Direct operator for approximate derivatives or a Tangent/Adjoint operators pair. Please check your operator input."%self.__name) + raise ValueError( + "The %s object is improperly defined or undefined,"%self.__name+\ + " it requires at minima either a matrix, a Direct operator for"+\ + " approximate derivatives or a Tangent/Adjoint operators pair."+\ + " Please check your operator input.") # if __appliedInX is not None: self.__FO["AppliedInX"] = {} - for key in list(__appliedInX.keys()): - if type( __appliedInX[key] ) is type( numpy.matrix([]) ): - # Pour le cas où l'on a une vraie matrice - self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T - elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1: - # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions - self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T - else: - self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T + for key in __appliedInX: + if isinstance(__appliedInX[key], str): + __appliedInX[key] = PlatformInfo.strvect2liststr( __appliedInX[key] ) + self.__FO["AppliedInX"][key] = numpy.ravel( __appliedInX[key] ).reshape((-1,1)) else: self.__FO["AppliedInX"] = None @@ -575,8 +677,15 @@ class Algorithm(object): - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations - CostFunctionJo : partie observations de la fonction-coût : Jo - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations + - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0 - CurrentOptimum : état optimal courant lors d'itérations - CurrentState : état courant lors d'itérations + - CurrentStepNumber : pas courant d'avancement dans les algorithmes en évolution, à partir de 0 + - EnsembleOfSimulations : ensemble d'états (sorties, simulations) rangés par colonne dans une matrice + - EnsembleOfSnapshots : ensemble d'états rangés par colonne dans une matrice + - EnsembleOfStates : ensemble d'états (entrées, paramètres) rangés par colonne dans une matrice + - ForecastCovariance : covariance de l'état prédit courant lors d'itérations + - ForecastState : état prédit courant lors d'itérations - GradientOfCostFunctionJ : gradient de la fonction-coût globale - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût @@ -590,8 +699,8 @@ class Algorithm(object): - MahalanobisConsistency : indicateur de consistance des covariances - OMA : Observation moins Analyse : Y - Xa - OMB : Observation moins Background : Y - Xb - - PredictedState : état prédit courant lors d'itérations - Residu : dans le cas des algorithmes de vérification + - SampledStateForQuantiles : échantillons d'états pour l'estimation des quantiles - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche - SigmaObs2 : indicateur de correction optimale des erreurs d'observation - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche @@ -607,11 +716,16 @@ class Algorithm(object): # self._name = str( name ) self._parameters = {"StoreSupplementaryCalculations":[]} + self.__internal_state = {} self.__required_parameters = {} - self.__required_inputs = {"RequiredInputValues":{"mandatory":(), "optional":()}} + self.__required_inputs = { + "RequiredInputValues":{"mandatory":(), "optional":()}, + "ClassificationTags":[], + } self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters self.__canonical_parameter_name = {} # Correspondance "lower"->"correct" self.__canonical_stored_name = {} # Correspondance "lower"->"correct" + self.__replace_by_the_new_name = {} # Nouveau nom à partir d'un nom ancien # self.StoredVariables = {} self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations") @@ -626,12 +740,20 @@ class Algorithm(object): self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum") self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo") self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum") + self.StoredVariables["CurrentEnsembleState"] = Persistence.OneMatrix(name = "CurrentEnsembleState") + self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber") self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum") self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState") + self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber") + self.StoredVariables["EnsembleOfSimulations"] = Persistence.OneMatrix(name = "EnsembleOfSimulations") + self.StoredVariables["EnsembleOfSnapshots"] = Persistence.OneMatrix(name = "EnsembleOfSnapshots") + self.StoredVariables["EnsembleOfStates"] = Persistence.OneMatrix(name = "EnsembleOfStates") + self.StoredVariables["ForecastCovariance"] = Persistence.OneMatrix(name = "ForecastCovariance") + self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState") self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ") self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb") self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo") - self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum") + self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum") self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation") self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis") self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState") @@ -642,8 +764,11 @@ class Algorithm(object): self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency") self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA") self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB") - self.StoredVariables["PredictedState"] = Persistence.OneVector(name = "PredictedState") + self.StoredVariables["OptimalPoints"] = Persistence.OneVector(name = "OptimalPoints") + self.StoredVariables["ReducedBasis"] = Persistence.OneMatrix(name = "ReducedBasis") self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu") + self.StoredVariables["Residus"] = Persistence.OneVector(name = "Residus") + self.StoredVariables["SampledStateForQuantiles"] = Persistence.OneMatrix(name = "SampledStateForQuantiles") self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2") self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2") self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground") @@ -661,66 +786,122 @@ class Algorithm(object): self.__canonical_parameter_name["algorithm"] = "Algorithm" self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations" - def _pre_run(self, Parameters, Xb=None, Y=None, R=None, B=None, Q=None ): + def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ): "Pré-calcul" logging.debug("%s Lancement", self._name) logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio"))) + self._getTimeState(reset=True) # - # Mise a jour des paramètres internes avec le contenu de Parameters, en + # Mise à jour des paramètres internes avec le contenu de Parameters, en # reprenant les valeurs par défauts pour toutes celles non définies self.__setParameters(Parameters, reset=True) for k, v in self.__variable_names_not_public.items(): if k not in self._parameters: self.__setParameters( {k:v} ) - # - # Corrections et compléments - def __test_vvalue(argument, variable, argname): + + # Corrections et compléments des vecteurs + def __test_vvalue(argument, variable, argname, symbol=None): + if symbol is None: symbol = variable if argument is None: if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]: - raise ValueError("%s %s vector %s has to be properly defined!"%(self._name,argname,variable)) + raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol)) elif variable in self.__required_inputs["RequiredInputValues"]["optional"]: - logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,variable)) + logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol)) else: - logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,variable)) + logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol)) else: - logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,variable,numpy.array(argument).size)) + if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]: + logging.debug("%s %s vector %s is required and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size)) + elif variable in self.__required_inputs["RequiredInputValues"]["optional"]: + logging.debug("%s %s vector %s is optional and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size)) + else: + logging.debug( + "%s %s vector %s is set although neither required nor optional, and its size is %i."%( + self._name,argname,symbol,numpy.array(argument).size)) return 0 __test_vvalue( Xb, "Xb", "Background or initial state" ) __test_vvalue( Y, "Y", "Observation" ) - # - def __test_cvalue(argument, variable, argname): + __test_vvalue( U, "U", "Control" ) + + # Corrections et compléments des covariances + def __test_cvalue(argument, variable, argname, symbol=None): + if symbol is None: symbol = variable if argument is None: if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]: - raise ValueError("%s %s error covariance matrix %s has to be properly defined!"%(self._name,argname,variable)) + raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol)) elif variable in self.__required_inputs["RequiredInputValues"]["optional"]: - logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,variable)) + logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol)) else: - logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,variable)) + logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol)) else: - logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,variable)) + if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]: + logging.debug("%s %s error covariance matrix %s is required and set."%(self._name,argname,symbol)) + elif variable in self.__required_inputs["RequiredInputValues"]["optional"]: + logging.debug("%s %s error covariance matrix %s is optional and set."%(self._name,argname,symbol)) + else: + logging.debug("%s %s error covariance matrix %s is set although neither required nor optional."%(self._name,argname,symbol)) return 0 - __test_cvalue( R, "R", "Observation" ) __test_cvalue( B, "B", "Background" ) + __test_cvalue( R, "R", "Observation" ) __test_cvalue( Q, "Q", "Evolution" ) + + # Corrections et compléments des opérateurs + def __test_ovalue(argument, variable, argname, symbol=None): + if symbol is None: symbol = variable + if argument is None or (isinstance(argument,dict) and len(argument)==0): + if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]: + raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol)) + elif variable in self.__required_inputs["RequiredInputValues"]["optional"]: + logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol)) + else: + logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol)) + else: + if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]: + logging.debug("%s %s operator %s is required and set."%(self._name,argname,symbol)) + elif variable in self.__required_inputs["RequiredInputValues"]["optional"]: + logging.debug("%s %s operator %s is optional and set."%(self._name,argname,symbol)) + else: + logging.debug("%s %s operator %s is set although neither required nor optional."%(self._name,argname,symbol)) + return 0 + __test_ovalue( HO, "HO", "Observation", "H" ) + __test_ovalue( EM, "EM", "Evolution", "M" ) + __test_ovalue( CM, "CM", "Control Model", "C" ) # + # Corrections et compléments des bornes if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0): - logging.debug("%s Prise en compte des bornes effectuee"%(self._name,)) + logging.debug("%s Bounds taken into account"%(self._name,)) else: self._parameters["Bounds"] = None + if ("StateBoundsForQuantiles" in self._parameters) \ + and isinstance(self._parameters["StateBoundsForQuantiles"], (list, tuple)) \ + and (len(self._parameters["StateBoundsForQuantiles"]) > 0): + logging.debug("%s Bounds for quantiles states taken into account"%(self._name,)) + # Attention : contrairement à Bounds, pas de défaut à None, sinon on ne peut pas être sans bornes + # + # Corrections et compléments de l'initialisation en X + if "InitializationPoint" in self._parameters: + if Xb is not None: + if self._parameters["InitializationPoint"] is not None and hasattr(self._parameters["InitializationPoint"],'size'): + if self._parameters["InitializationPoint"].size != numpy.ravel(Xb).size: + raise ValueError("Incompatible size %i of forced initial point that have to replace the background of size %i" \ + %(self._parameters["InitializationPoint"].size,numpy.ravel(Xb).size)) + # Obtenu par typecast : numpy.ravel(self._parameters["InitializationPoint"]) + else: + self._parameters["InitializationPoint"] = numpy.ravel(Xb) + else: + if self._parameters["InitializationPoint"] is None: + raise ValueError("Forced initial point can not be set without any given Background or required value") + # + # Correction pour pallier a un bug de TNC sur le retour du Minimum + if "Minimizer" in self._parameters and self._parameters["Minimizer"] == "TNC": + self.setParameterValue("StoreInternalVariables",True) # + # Verbosité et logging if logging.getLogger().level < logging.WARNING: self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1 - if PlatformInfo.has_scipy: - import scipy.optimize - self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL - else: - self._parameters["optmessages"] = 15 + self._parameters["optmessages"] = 15 else: self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0 - if PlatformInfo.has_scipy: - import scipy.optimize - self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE - else: - self._parameters["optmessages"] = 15 + self._parameters["optmessages"] = 0 # return 0 @@ -737,10 +918,15 @@ class Algorithm(object): _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A))) _C = numpy.dot(_EI, numpy.dot(_A, _EI)) self.StoredVariables["APosterioriCorrelations"].store( _C ) - if _oH is not None: - logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0)) - logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3)) + if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH: + logging.debug( + "%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", + self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0)) + logging.debug( + "%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", + self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3)) logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio")) + logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1]) logging.debug("%s Terminé", self._name) return 0 @@ -782,22 +968,31 @@ class Algorithm(object): else: try: msg = "'%s'"%k - except: + except Exception: raise TypeError("pop expected at least 1 arguments, got 0") "If key is not found, d is returned if given, otherwise KeyError is raised" try: return d - except: + except Exception: raise KeyError(msg) def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None): """ - Doit implémenter l'opération élémentaire de calcul d'assimilation sous - sa forme mathématique la plus naturelle possible. + Doit implémenter l'opération élémentaire de calcul algorithmique. """ - raise NotImplementedError("Mathematical assimilation calculation has not been implemented!") - - def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None): + raise NotImplementedError("Mathematical algorithmic calculation has not been implemented!") + + def defineRequiredParameter(self, + name = None, + default = None, + typecast = None, + message = None, + minval = None, + maxval = None, + listval = None, + listadv = None, + oldname = None, + ): """ Permet de définir dans l'algorithme des paramètres requis et leurs caractéristiques par défaut. @@ -811,9 +1006,14 @@ class Algorithm(object): "minval" : minval, "maxval" : maxval, "listval" : listval, + "listadv" : listadv, "message" : message, + "oldname" : oldname, } self.__canonical_parameter_name[name.lower()] = name + if oldname is not None: + self.__canonical_parameter_name[oldname.lower()] = name # Conversion + self.__replace_by_the_new_name[oldname.lower()] = name logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name)) def getRequiredParameters(self, noDetails=True): @@ -836,6 +1036,7 @@ class Algorithm(object): minval = self.__required_parameters[__k]["minval"] maxval = self.__required_parameters[__k]["maxval"] listval = self.__required_parameters[__k]["listval"] + listadv = self.__required_parameters[__k]["listadv"] # if value is None and default is None: __val = None @@ -847,30 +1048,46 @@ class Algorithm(object): else: try: __val = typecast( value ) - except: + except Exception: raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast)) # if minval is not None and (numpy.array(__val, float) < minval).any(): raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval)) if maxval is not None and (numpy.array(__val, float) > maxval).any(): raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval)) - if listval is not None: + if listval is not None or listadv is not None: if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple): for v in __val: - if v not in listval: + if listval is not None and v in listval: continue + elif listadv is not None and v in listadv: continue + else: raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval)) - elif __val not in listval: + elif not (listval is not None and __val in listval) and not (listadv is not None and __val in listadv): raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval)) # return __val def requireInputArguments(self, mandatory=(), optional=()): """ - Permet d'imposer des arguments requises en entrée + Permet d'imposer des arguments de calcul requis en entrée. """ self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory ) self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional ) + def getInputArguments(self): + """ + Permet d'obtenir les listes des arguments de calcul requis en entrée. + """ + return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"] + + def setAttributes(self, tags=()): + """ + Permet d'adjoindre des attributs comme les tags de classification. + Renvoie la liste actuelle dans tous les cas. + """ + self.__required_inputs["ClassificationTags"].extend( tags ) + return self.__required_inputs["ClassificationTags"] + def __setParameters(self, fromDico={}, reset=False): """ Permet de stocker les paramètres reçus dans le dictionnaire interne. @@ -882,6 +1099,14 @@ class Algorithm(object): __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()]) __canonic_fromDico_keys = __inverse_fromDico_keys.keys() + # + for k in __inverse_fromDico_keys.values(): + if k.lower() in self.__replace_by_the_new_name: + __newk = self.__replace_by_the_new_name[k.lower()] + __msg = "the parameter \"%s\" used in \"%s\" algorithm case is deprecated and has to be replaced by \"%s\"."%(k,self._name,__newk) + __msg += " Please update your code." + warnings.warn(__msg, FutureWarning, stacklevel=50) + # for k in self.__required_parameters.keys(): if k in __canonic_fromDico_keys: self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]]) @@ -889,7 +1114,96 @@ class Algorithm(object): self._parameters[k] = self.setParameterValue(k) else: pass - logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k]) + if hasattr(self._parameters[k],"__len__") and len(self._parameters[k]) > 100: + logging.debug("%s %s de longueur %s", self._name, self.__required_parameters[k]["message"], len(self._parameters[k])) + else: + logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k]) + + def _setInternalState(self, key=None, value=None, fromDico={}, reset=False): + """ + Permet de stocker des variables nommées constituant l'état interne + """ + if reset: # Vide le dictionnaire préalablement + self.__internal_state = {} + if key is not None and value is not None: + self.__internal_state[key] = value + self.__internal_state.update( dict(fromDico) ) + + def _getInternalState(self, key=None): + """ + Restitue un état interne sous la forme d'un dictionnaire de variables nommées + """ + if key is not None and key in self.__internal_state: + return self.__internal_state[key] + else: + return self.__internal_state + + def _getTimeState(self, reset=False): + """ + Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde + """ + if reset: + self.__initial_cpu_time = time.process_time() + self.__initial_elapsed_time = time.perf_counter() + return 0., 0. + else: + self.__cpu_time = time.process_time() - self.__initial_cpu_time + self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time + return self.__cpu_time, self.__elapsed_time + + def _StopOnTimeLimit(self, X=None, withReason=False): + "Stop criteria on time limit: True/False [+ Reason]" + c, e = self._getTimeState() + if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]: + __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"]) + elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]: + __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"]) + else: + __SC, __SR = False, "" + if withReason: + return __SC, __SR + else: + return __SC + +# ============================================================================== +class PartialAlgorithm(object): + """ + Classe pour mimer "Algorithm" du point de vue stockage, mais sans aucune + action avancée comme la vérification . Pour les méthodes reprises ici, + le fonctionnement est identique à celles de la classe "Algorithm". + """ + def __init__(self, name): + self._name = str( name ) + self._parameters = {"StoreSupplementaryCalculations":[]} + # + self.StoredVariables = {} + self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis") + self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ") + self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb") + self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo") + self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber") + self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber") + # + self.__canonical_stored_name = {} + for k in self.StoredVariables: + self.__canonical_stored_name[k.lower()] = k + + def _toStore(self, key): + "True if in StoreSupplementaryCalculations, else False" + return key in self._parameters["StoreSupplementaryCalculations"] + + def get(self, key=None): + """ + Renvoie l'une des variables stockées identifiée par la clé, ou le + dictionnaire de l'ensemble des variables disponibles en l'absence de + clé. Ce sont directement les variables sous forme objet qui sont + renvoyées, donc les méthodes d'accès à l'objet individuel sont celles + des classes de persistance. + """ + if key is not None: + return self.StoredVariables[self.__canonical_stored_name[key.lower()]] + else: + return self.StoredVariables # ============================================================================== class AlgorithmAndParameters(object): @@ -931,7 +1245,7 @@ class AlgorithmAndParameters(object): asDict = None, asScript = None, ): - "Mise a jour des parametres" + "Mise à jour des paramètres" if asDict is None and asScript is not None: __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" ) else: @@ -1004,7 +1318,7 @@ class AlgorithmAndParameters(object): try: catalogAd = r.loadCatalog("proc", __file) r.addCatalog(catalogAd) - except: + except Exception: pass try: @@ -1053,6 +1367,14 @@ class AlgorithmAndParameters(object): "Renvoie la liste des paramètres requis selon l'algorithme" return self.__algorithm.getRequiredParameters(noDetails) + def getAlgorithmInputArguments(self): + "Renvoie la liste des entrées requises selon l'algorithme" + return self.__algorithm.getInputArguments() + + def getAlgorithmAttributes(self): + "Renvoie la liste des attributs selon l'algorithme" + return self.__algorithm.setAttributes() + def setObserver(self, __V, __O, __I, __S): if self.__algorithm is None \ or isinstance(self.__algorithm, dict) \ @@ -1126,7 +1448,8 @@ class AlgorithmAndParameters(object): if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')): module_path = os.path.abspath(os.path.join(directory, daDirectory)) if module_path is None: - raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path)) + raise ImportError( + "No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path)) # # Importe le fichier complet comme un module # ------------------------------------------ @@ -1138,7 +1461,8 @@ class AlgorithmAndParameters(object): self.__algorithmName = str(choice) sys.path = sys_path_tmp ; del sys_path_tmp except ImportError as e: - raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e)) + raise ImportError( + "The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e)) # # Instancie un objet du type élémentaire du fichier # ------------------------------------------------- @@ -1227,32 +1551,48 @@ class AlgorithmAndParameters(object): raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,)) # if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ): - raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape)) + raise ValueError( + "Shape characteristic of observation operator (H)"+\ + " \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape)) if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ): - raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape)) + raise ValueError( + "Shape characteristic of observation operator (H)"+\ + " \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape)) if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ): - raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape)) + raise ValueError( + "Shape characteristic of observation operator (H)"+\ + " \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape)) if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ): - raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape)) + raise ValueError( + "Shape characteristic of observation operator (H)"+\ + " \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape)) # if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ): if self.__algorithmName in ["EnsembleBlue",]: asPersistentVector = self.__Xb.reshape((-1,min(__B_shape))) - self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix) + self.__Xb = Persistence.OneVector("Background") for member in asPersistentVector: - self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T ) + self.__Xb.store( numpy.asarray(member, dtype=float) ) __Xb_shape = min(__B_shape) else: - raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape)) + raise ValueError( + "Shape characteristic of a priori errors covariance matrix (B)"+\ + " \"%s\" and background vector (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape)) # if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ): - raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape)) + raise ValueError( + "Shape characteristic of observation errors covariance matrix (R)"+\ + " \"%s\" and observation vector (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape)) # if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ): - raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape)) + raise ValueError( + "Shape characteristic of evolution model (EM)"+\ + " \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape)) # if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ): - raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape)) + raise ValueError( + "Shape characteristic of control model (CM)"+\ + " \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape)) # if ("Bounds" in self.__P) \ and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \ @@ -1260,6 +1600,12 @@ class AlgorithmAndParameters(object): raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \ %(len(self.__P["Bounds"]),max(__Xb_shape))) # + if ("StateBoundsForQuantiles" in self.__P) \ + and (isinstance(self.__P["StateBoundsForQuantiles"], list) or isinstance(self.__P["StateBoundsForQuantiles"], tuple)) \ + and (len(self.__P["StateBoundsForQuantiles"]) != max(__Xb_shape)): + raise ValueError("The number \"%s\" of bound pairs for the quantile state (X) components is different of the size \"%s\" of the state itself." \ + %(len(self.__P["StateBoundsForQuantiles"]),max(__Xb_shape))) + # return 1 # ============================================================================== @@ -1292,7 +1638,7 @@ class RegulationAndParameters(object): self.__P.update( dict(__Dict) ) # if __Algo is not None: - self.__P.update( {"Algorithm":__Algo} ) + self.__P.update( {"Algorithm":str(__Algo)} ) def get(self, key = None): "Vérifie l'existence d'une clé de variable ou de paramètres" @@ -1341,19 +1687,11 @@ class DataObserver(object): else: raise ValueError("setting an observer has to be done over a variable name or a list of variable names.") # - if asString is not None: - __FunctionText = asString - elif (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates): - __FunctionText = Templates.ObserverTemplates[asTemplate] - elif asScript is not None: - __FunctionText = Interfaces.ImportFromScript(asScript).getstring() - else: - __FunctionText = "" - __Function = ObserverF(__FunctionText) - # if asObsObject is not None: self.__O = asObsObject else: + __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript)) + __Function = Observer2Func(__FunctionText) self.__O = __Function.getfunc() # for k in range(len(self.__V)): @@ -1372,6 +1710,89 @@ class DataObserver(object): "x.__str__() <==> str(x)" return str(self.__V)+"\n"+str(self.__O) +# ============================================================================== +class UserScript(object): + """ + Classe générale d'interface de type texte de script utilisateur + """ + def __init__(self, + name = "GenericUserScript", + asTemplate = None, + asString = None, + asScript = None, + ): + """ + """ + self.__name = str(name) + # + if asString is not None: + self.__F = asString + elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates): + self.__F = Templates.UserPostAnalysisTemplates[asTemplate] + elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates): + self.__F = Templates.ObserverTemplates[asTemplate] + elif asScript is not None: + self.__F = Interfaces.ImportFromScript(asScript).getstring() + else: + self.__F = "" + + def __repr__(self): + "x.__repr__() <==> repr(x)" + return repr(self.__F) + + def __str__(self): + "x.__str__() <==> str(x)" + return str(self.__F) + +# ============================================================================== +class ExternalParameters(object): + """ + Classe générale d'interface pour le stockage des paramètres externes + """ + def __init__(self, + name = "GenericExternalParameters", + asDict = None, + asScript = None, + ): + """ + """ + self.__name = str(name) + self.__P = {} + # + self.updateParameters( asDict, asScript ) + + def updateParameters(self, + asDict = None, + asScript = None, + ): + "Mise à jour des paramètres" + if asDict is None and asScript is not None: + __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" ) + else: + __Dict = asDict + # + if __Dict is not None: + self.__P.update( dict(__Dict) ) + + def get(self, key = None): + if key in self.__P: + return self.__P[key] + else: + return list(self.__P.keys()) + + def keys(self): + return list(self.__P.keys()) + + def pop(self, k, d): + return self.__P.pop(k, d) + + def items(self): + return self.__P.items() + + def __contains__(self, key=None): + "D.__contains__(k) -> True if D has a key k, else False" + return key in self.__P + # ============================================================================== class State(object): """ @@ -1447,16 +1868,21 @@ class State(object): # if __Vector is not None: self.__is_vector = True - self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T + if isinstance(__Vector, str): + __Vector = PlatformInfo.strvect2liststr( __Vector ) + self.__V = numpy.ravel(numpy.asarray( __Vector, dtype=float )).reshape((-1,1)) self.shape = self.__V.shape self.size = self.__V.size elif __Series is not None: self.__is_series = True if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)): - self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix) - if isinstance(__Series, str): __Series = eval(__Series) + self.__V = Persistence.OneVector(self.__name) + if isinstance(__Series, str): + __Series = PlatformInfo.strmatrix2liststr(__Series) for member in __Series: - self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T ) + if isinstance(member, str): + member = PlatformInfo.strvect2liststr( member ) + self.__V.store(numpy.asarray( member, dtype=float )) else: self.__V = __Series if isinstance(self.__V.shape, (tuple, list)): @@ -1467,7 +1893,10 @@ class State(object): self.shape = (self.shape[0],1) self.size = self.shape[0] * self.shape[1] else: - raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name) + raise ValueError( + "The %s object is improperly defined or undefined,"%self.__name+\ + " it requires at minima either a vector, a list/tuple of"+\ + " vectors or a persistent object. Please check your vector input.") # if scheduledBy is not None: self.__T = scheduledBy @@ -1550,15 +1979,23 @@ class Covariance(object): __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject # if __Scalar is not None: - if numpy.matrix(__Scalar).size != 1: - raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size) + if isinstance(__Scalar, str): + __Scalar = PlatformInfo.strvect2liststr( __Scalar ) + if len(__Scalar) > 0: __Scalar = __Scalar[0] + if numpy.array(__Scalar).size != 1: + raise ValueError( + " The diagonal multiplier given to define a sparse matrix is"+\ + " not a unique scalar value.\n Its actual measured size is"+\ + " %i. Please check your scalar input."%numpy.array(__Scalar).size) self.__is_scalar = True self.__C = numpy.abs( float(__Scalar) ) self.shape = (0,0) self.size = 0 elif __Vector is not None: + if isinstance(__Vector, str): + __Vector = PlatformInfo.strvect2liststr( __Vector ) self.__is_vector = True - self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) ) + self.__C = numpy.abs( numpy.ravel(numpy.asarray( __Vector, dtype=float )) ) self.shape = (self.__C.size,self.__C.size) self.size = self.__C.size**2 elif __Matrix is not None: @@ -1569,7 +2006,7 @@ class Covariance(object): elif __Object is not None: self.__is_object = True self.__C = __Object - for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"): + for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__matmul__","__mul__","__rmatmul__","__rmul__"): if not hasattr(self.__C,at): raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at)) if hasattr(self.__C,"shape"): @@ -1582,7 +2019,6 @@ class Covariance(object): self.size = 0 else: pass - # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name) # self.__validate() @@ -1600,13 +2036,13 @@ class Covariance(object): raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,)) if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING): try: - L = numpy.linalg.cholesky( self.__C ) - except: + numpy.linalg.cholesky( self.__C ) + except Exception: raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,)) if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING): try: - L = self.__C.cholesky() - except: + self.__C.cholesky() + except Exception: raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,)) def isscalar(self): @@ -1628,12 +2064,12 @@ class Covariance(object): def getI(self): "Inversion" if self.ismatrix(): - return Covariance(self.__name+"I", asCovariance = self.__C.I ) + return Covariance(self.__name+"I", asCovariance = numpy.linalg.inv(self.__C) ) elif self.isvector(): return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C ) elif self.isscalar(): return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C ) - elif self.isobject(): + elif self.isobject() and hasattr(self.__C,"getI"): return Covariance(self.__name+"I", asCovObject = self.__C.getI() ) else: return None # Indispensable @@ -1646,8 +2082,10 @@ class Covariance(object): return Covariance(self.__name+"T", asEyeByVector = self.__C ) elif self.isscalar(): return Covariance(self.__name+"T", asEyeByScalar = self.__C ) - elif self.isobject(): + elif self.isobject() and hasattr(self.__C,"getT"): return Covariance(self.__name+"T", asCovObject = self.__C.getT() ) + else: + raise AttributeError("the %s covariance matrix has no getT attribute."%(self.__name,)) def cholesky(self): "Décomposition de Cholesky" @@ -1659,17 +2097,49 @@ class Covariance(object): return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) ) elif self.isobject() and hasattr(self.__C,"cholesky"): return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() ) + else: + raise AttributeError("the %s covariance matrix has no cholesky attribute."%(self.__name,)) def choleskyI(self): "Inversion de la décomposition de Cholesky" if self.ismatrix(): - return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I ) + return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.linalg.cholesky(self.__C)) ) elif self.isvector(): return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) ) elif self.isscalar(): return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) ) elif self.isobject() and hasattr(self.__C,"choleskyI"): return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() ) + else: + raise AttributeError("the %s covariance matrix has no choleskyI attribute."%(self.__name,)) + + def sqrtm(self): + "Racine carrée matricielle" + if self.ismatrix(): + import scipy + return Covariance(self.__name+"C", asCovariance = numpy.real(scipy.linalg.sqrtm(self.__C)) ) + elif self.isvector(): + return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) ) + elif self.isscalar(): + return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) ) + elif self.isobject() and hasattr(self.__C,"sqrtm"): + return Covariance(self.__name+"C", asCovObject = self.__C.sqrtm() ) + else: + raise AttributeError("the %s covariance matrix has no sqrtm attribute."%(self.__name,)) + + def sqrtmI(self): + "Inversion de la racine carrée matricielle" + if self.ismatrix(): + import scipy + return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.real(scipy.linalg.sqrtm(self.__C))) ) + elif self.isvector(): + return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) ) + elif self.isscalar(): + return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) ) + elif self.isobject() and hasattr(self.__C,"sqrtmI"): + return Covariance(self.__name+"H", asCovObject = self.__C.sqrtmI() ) + else: + raise AttributeError("the %s covariance matrix has no sqrtmI attribute."%(self.__name,)) def diag(self, msize=None): "Diagonale de la matrice" @@ -1682,22 +2152,10 @@ class Covariance(object): raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,)) else: return self.__C * numpy.ones(int(msize)) - elif self.isobject(): + elif self.isobject() and hasattr(self.__C,"diag"): return self.__C.diag() - - def asfullmatrix(self, msize=None): - "Matrice pleine" - if self.ismatrix(): - return self.__C - elif self.isvector(): - return numpy.matrix( numpy.diag(self.__C), float ) - elif self.isscalar(): - if msize is None: - raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,)) - else: - return numpy.matrix( self.__C * numpy.eye(int(msize)), float ) - elif self.isobject() and hasattr(self.__C,"asfullmatrix"): - return self.__C.asfullmatrix() + else: + raise AttributeError("the %s covariance matrix has no diag attribute."%(self.__name,)) def trace(self, msize=None): "Trace de la matrice" @@ -1712,6 +2170,28 @@ class Covariance(object): return self.__C * int(msize) elif self.isobject(): return self.__C.trace() + else: + raise AttributeError("the %s covariance matrix has no trace attribute."%(self.__name,)) + + def asfullmatrix(self, msize=None): + "Matrice pleine" + if self.ismatrix(): + return numpy.asarray(self.__C, dtype=float) + elif self.isvector(): + return numpy.asarray( numpy.diag(self.__C), dtype=float ) + elif self.isscalar(): + if msize is None: + raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,)) + else: + return numpy.asarray( self.__C * numpy.eye(int(msize)), dtype=float ) + elif self.isobject() and hasattr(self.__C,"asfullmatrix"): + return self.__C.asfullmatrix() + else: + raise AttributeError("the %s covariance matrix has no asfullmatrix attribute."%(self.__name,)) + + def assparsematrix(self): + "Valeur sparse" + return self.__C def getO(self): return self @@ -1730,7 +2210,10 @@ class Covariance(object): return self.__C + numpy.asmatrix(other) elif self.isvector() or self.isscalar(): _A = numpy.asarray(other) - _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C + if len(_A.shape) == 1: + _A.reshape((-1,1))[::2] += self.__C + else: + _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C return numpy.asmatrix(_A) def __radd__(self, other): @@ -1754,6 +2237,36 @@ class Covariance(object): "x.__neg__() <==> -x" return - self.__C + def __matmul__(self, other): + "x.__mul__(y) <==> x@y" + if self.ismatrix() and isinstance(other, (int, float)): + return numpy.asarray(self.__C) * other + elif self.ismatrix() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)): + if numpy.ravel(other).size == self.shape[1]: # Vecteur + return numpy.ravel(self.__C @ numpy.ravel(other)) + elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice + return numpy.asarray(self.__C) @ numpy.asarray(other) + else: + raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asarray(other).shape,self.__name)) + elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)): + if numpy.ravel(other).size == self.shape[1]: # Vecteur + return numpy.ravel(self.__C) * numpy.ravel(other) + elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice + return numpy.ravel(self.__C).reshape((-1,1)) * numpy.asarray(other) + else: + raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name)) + elif self.isscalar() and isinstance(other,numpy.matrix): + return numpy.asarray(self.__C * other) + elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)): + if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1: + return self.__C * numpy.ravel(other) + else: + return self.__C * numpy.asarray(other) + elif self.isobject(): + return self.__C.__matmul__(other) + else: + raise NotImplementedError("%s covariance matrix __matmul__ method not available for %s type!"%(self.__name,type(other))) + def __mul__(self, other): "x.__mul__(y) <==> x*y" if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)): @@ -1764,14 +2277,16 @@ class Covariance(object): elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice return self.__C * numpy.asmatrix(other) else: - raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name)) + raise ValueError( + "operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name)) elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)): if numpy.ravel(other).size == self.shape[1]: # Vecteur return numpy.asmatrix(self.__C * numpy.ravel(other)).T elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose()) else: - raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name)) + raise ValueError( + "operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name)) elif self.isscalar() and isinstance(other,numpy.matrix): return self.__C * other elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)): @@ -1782,7 +2297,36 @@ class Covariance(object): elif self.isobject(): return self.__C.__mul__(other) else: - raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other))) + raise NotImplementedError( + "%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other))) + + def __rmatmul__(self, other): + "x.__rmul__(y) <==> y@x" + if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)): + return other * self.__C + elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)): + if numpy.ravel(other).size == self.shape[1]: # Vecteur + return numpy.asmatrix(numpy.ravel(other)) * self.__C + elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice + return numpy.asmatrix(other) * self.__C + else: + raise ValueError( + "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name)) + elif self.isvector() and isinstance(other,numpy.matrix): + if numpy.ravel(other).size == self.shape[0]: # Vecteur + return numpy.asmatrix(numpy.ravel(other) * self.__C) + elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice + return numpy.asmatrix(numpy.array(other) * self.__C) + else: + raise ValueError( + "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name)) + elif self.isscalar() and isinstance(other,numpy.matrix): + return other * self.__C + elif self.isobject(): + return self.__C.__rmatmul__(other) + else: + raise NotImplementedError( + "%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name,type(other))) def __rmul__(self, other): "x.__rmul__(y) <==> y*x" @@ -1794,29 +2338,34 @@ class Covariance(object): elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice return numpy.asmatrix(other) * self.__C else: - raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name)) + raise ValueError( + "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name)) elif self.isvector() and isinstance(other,numpy.matrix): if numpy.ravel(other).size == self.shape[0]: # Vecteur return numpy.asmatrix(numpy.ravel(other) * self.__C) elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice return numpy.asmatrix(numpy.array(other) * self.__C) else: - raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name)) + raise ValueError( + "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name)) elif self.isscalar() and isinstance(other,numpy.matrix): return other * self.__C + elif self.isscalar() and isinstance(other,float): + return other * self.__C elif self.isobject(): return self.__C.__rmul__(other) else: - raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other))) + raise NotImplementedError( + "%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other))) def __len__(self): "x.__len__() <==> len(x)" return self.shape[0] # ============================================================================== -class ObserverF(object): +class Observer2Func(object): """ - Creation d'une fonction d'observateur a partir de son texte + Création d'une fonction d'observateur a partir de son texte """ def __init__(self, corps=""): self.__corps = corps @@ -1830,7 +2379,7 @@ class ObserverF(object): # ============================================================================== class CaseLogger(object): """ - Conservation des commandes de creation d'un cas + Conservation des commandes de création d'un cas """ def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None): self.__name = str(__name) @@ -1841,6 +2390,9 @@ class CaseLogger(object): "TUI" :Interfaces._TUIViewer, "SCD" :Interfaces._SCDViewer, "YACS":Interfaces._YACSViewer, + "SimpleReportInRst":Interfaces._SimpleReportInRstViewer, + "SimpleReportInHtml":Interfaces._SimpleReportInHtmlViewer, + "SimpleReportInPlainTxt":Interfaces._SimpleReportInPlainTxtViewer, } self.__loaders = { "TUI" :Interfaces._TUIViewer, @@ -1909,16 +2461,8 @@ def MultiFonction( # # Calculs effectifs if __mpEnabled: - _jobs = [] - if _extraArguments is None: - _jobs = __xserie - elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)): - for __xvalue in __xserie: - _jobs.append( [__xvalue, ] + list(_extraArguments) ) - else: - raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),)) + _jobs = __xserie # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),)) - import multiprocessing with multiprocessing.Pool(__mpWorkers) as pool: __multiHX = pool.map( _sFunction, _jobs ) pool.close() @@ -1943,121 +2487,6 @@ def MultiFonction( # logging.debug("MULTF Internal multifonction calculations end") return __multiHX -# ============================================================================== -def CostFunction3D(_x, - _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo - _HmX = None, # Simulation déjà faite de Hm(x) - _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple - _BI = None, - _RI = None, - _Xb = None, - _Y = None, - _SIV = False, # A résorber pour la 8.0 - _SSC = [], # self._parameters["StoreSupplementaryCalculations"] - _nPS = 0, # nbPreviousSteps - _QM = "DA", # QualityMeasure - _SSV = {}, # Entrée et/ou sortie : self.StoredVariables - _fRt = False, # Restitue ou pas la sortie étendue - _sSc = True, # Stocke ou pas les SSC - ): - """ - Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE - et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu, - DFO, QuantileRegression - """ - if not _sSc: - _SIV = False - _SSC = {} - else: - for k in ["CostFunctionJ", - "CostFunctionJb", - "CostFunctionJo", - "CurrentOptimum", - "CurrentState", - "IndexOfOptimum", - "SimulatedObservationAtCurrentOptimum", - "SimulatedObservationAtCurrentState", - ]: - if k not in _SSV: - _SSV[k] = [] - if hasattr(_SSV[k],"store"): - _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store" - # - _X = numpy.asmatrix(numpy.ravel( _x )).T - if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC: - _SSV["CurrentState"].append( _X ) - # - if _HmX is not None: - _HX = _HmX - else: - if _Hm is None: - raise ValueError("COSTFUNCTION3D Operator has to be defined.") - if _arg is None: - _HX = _Hm( _X ) - else: - _HX = _Hm( _X, *_arg ) - _HX = numpy.asmatrix(numpy.ravel( _HX )).T - # - if "SimulatedObservationAtCurrentState" in _SSC or \ - "SimulatedObservationAtCurrentOptimum" in _SSC: - _SSV["SimulatedObservationAtCurrentState"].append( _HX ) - # - if numpy.any(numpy.isnan(_HX)): - Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan - else: - _Y = numpy.asmatrix(numpy.ravel( _Y )).T - if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]: - if _BI is None or _RI is None: - raise ValueError("Background and Observation error covariance matrix has to be properly defined!") - _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T - Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb) - Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX) - elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]: - if _RI is None: - raise ValueError("Observation error covariance matrix has to be properly defined!") - Jb = 0. - Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX) - elif _QM in ["LeastSquares", "LS", "L2"]: - Jb = 0. - Jo = 0.5 * (_Y - _HX).T * (_Y - _HX) - elif _QM in ["AbsoluteValue", "L1"]: - Jb = 0. - Jo = numpy.sum( numpy.abs(_Y - _HX) ) - elif _QM in ["MaximumError", "ME"]: - Jb = 0. - Jo = numpy.max( numpy.abs(_Y - _HX) ) - elif _QM in ["QR", "Null"]: - Jb = 0. - Jo = 0. - else: - raise ValueError("Unknown asked quality measure!") - # - J = float( Jb ) + float( Jo ) - # - if _sSc: - _SSV["CostFunctionJb"].append( Jb ) - _SSV["CostFunctionJo"].append( Jo ) - _SSV["CostFunctionJ" ].append( J ) - # - if "IndexOfOptimum" in _SSC or \ - "CurrentOptimum" in _SSC or \ - "SimulatedObservationAtCurrentOptimum" in _SSC: - IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS - if "IndexOfOptimum" in _SSC: - _SSV["IndexOfOptimum"].append( IndexMin ) - if "CurrentOptimum" in _SSC: - _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] ) - if "SimulatedObservationAtCurrentOptimum" in _SSC: - _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] ) - # - if _fRt: - return _SSV - else: - if _QM in ["QR"]: # Pour le QuantileRegression - return _HX - else: - return J - # ============================================================================== if __name__ == "__main__": print('\n AUTODIAGNOSTIC\n')