X-Git-Url: http://git.salome-platform.org/gitweb/?a=blobdiff_plain;f=src%2FdaComposant%2FdaCore%2FBasicObjects.py;h=7b8953ac6cb8a0c3d3d811d2e512f379403717f0;hb=fce485f3b022e74f42a857c893021b90bc3cf838;hp=dee9c53373b8d1a61c8af6bb76dafddce726b003;hpb=b94b7f8d1992c3d8a00c921efe574b9d1f547e07;p=modules%2Fadao.git diff --git a/src/daComposant/daCore/BasicObjects.py b/src/daComposant/daCore/BasicObjects.py index dee9c53..7b8953a 100644 --- a/src/daComposant/daCore/BasicObjects.py +++ b/src/daComposant/daCore/BasicObjects.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright (C) 2008-2020 EDF R&D +# Copyright (C) 2008-2021 EDF R&D # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public @@ -30,6 +30,7 @@ import os import sys import logging import copy +import time import numpy from functools import partial from daCore import Persistence, PlatformInfo, Interfaces @@ -56,44 +57,46 @@ class CacheManager(object): def clearCache(self): "Vide le cache" - self.__listOPCV = [] # Previous Calculated Points, Results, Point Norms, Operator + self.__listOPCV = [] self.__seenNames = [] - # logging.debug("CM Tolerance de determination des doublons : %.2e", self.__tolerBP) - def wasCalculatedIn(self, xValue, oName="" ): #, info="" ): + def wasCalculatedIn(self, xValue, oName="" ): "Vérifie l'existence d'un calcul correspondant à la valeur" __alc = False __HxV = None if self.__enabled: for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1): - if not hasattr(xValue, 'size') or (str(oName) != self.__listOPCV[i][3]) or (xValue.size != self.__listOPCV[i][0].size): - # logging.debug("CM Différence de la taille %s de X et de celle %s du point %i déjà calculé", xValue.shape,i,self.__listOPCP[i].shape) + if not hasattr(xValue, 'size'): pass - elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < self.__tolerBP * self.__listOPCV[i][2]: + elif (str(oName) != self.__listOPCV[i][3]): + pass + elif (xValue.size != self.__listOPCV[i][0].size): + pass + elif (numpy.ravel(xValue)[0] - self.__listOPCV[i][0][0]) > (self.__tolerBP * self.__listOPCV[i][2] / self.__listOPCV[i][0].size): + pass + elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < (self.__tolerBP * self.__listOPCV[i][2]): __alc = True __HxV = self.__listOPCV[i][1] - # logging.debug("CM Cas%s déja calculé, portant le numéro %i", info, i) break return __alc, __HxV def storeValueInX(self, xValue, HxValue, oName="" ): "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x" if self.__lenghtOR < 0: - self.__lenghtOR = 2 * xValue.size + 2 + self.__lenghtOR = 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2 self.__initlnOR = self.__lenghtOR self.__seenNames.append(str(oName)) if str(oName) not in self.__seenNames: # Etend la liste si nouveau - self.__lenghtOR += 2 * xValue.size + 2 + self.__lenghtOR += 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2 self.__initlnOR += self.__lenghtOR self.__seenNames.append(str(oName)) while len(self.__listOPCV) > self.__lenghtOR: - # logging.debug("CM Réduction de la liste des cas à %i éléments par suppression du premier", self.__lenghtOR) self.__listOPCV.pop(0) self.__listOPCV.append( ( - copy.copy(numpy.ravel(xValue)), - copy.copy(HxValue), - numpy.linalg.norm(xValue), - str(oName), + copy.copy(numpy.ravel(xValue)), # 0 Previous point + copy.copy(HxValue), # 1 Previous value + numpy.linalg.norm(xValue), # 2 Norm + str(oName), # 3 Operator name ) ) def disable(self): @@ -178,7 +181,7 @@ class Operator(object): "Renvoie le type" return self.__Type - def appliedTo(self, xValue, HValue = None, argsAsSerie = False): + def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False): """ Permet de restituer le résultat de l'application de l'opérateur à une série d'arguments xValue. Cette méthode se contente d'appliquer, chaque @@ -202,13 +205,13 @@ class Operator(object): # if _HValue is not None: assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue" - HxValue = [] + _HxValue = [] for i in range(len(_HValue)): - HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T ) + _HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T ) if self.__AvoidRC: - Operator.CM.storeValueInX(_xValue[i],HxValue[-1],self.__name) + Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name) else: - HxValue = [] + _HxValue = [] _xserie = [] _hindex = [] for i, xv in enumerate(_xValue): @@ -223,13 +226,14 @@ class Operator(object): else: if self.__Matrix is not None: self.__addOneMatrixCall() - _hv = self.__Matrix * xv + _xv = numpy.matrix(numpy.ravel(xv)).T + _hv = self.__Matrix * _xv else: self.__addOneMethodCall() _xserie.append( xv ) _hindex.append( i ) _hv = None - HxValue.append( _hv ) + _HxValue.append( _hv ) # if len(_xserie)>0 and self.__Matrix is None: if self.__extraArgs is None: @@ -241,14 +245,17 @@ class Operator(object): for i in _hindex: _xv = _xserie.pop(0) _hv = _hserie.pop(0) - HxValue[i] = _hv + _HxValue[i] = _hv if self.__AvoidRC: Operator.CM.storeValueInX(_xv,_hv,self.__name) # - if argsAsSerie: return HxValue - else: return HxValue[-1] + if returnSerieAsArrayMatrix: + _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1) + # + if argsAsSerie: return _HxValue + else: return _HxValue[-1] - def appliedControledFormTo(self, paires, argsAsSerie = False): + def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False): """ Permet de restituer le résultat de l'application de l'opérateur à des paires (xValue, uValue). Cette méthode se contente d'appliquer, son @@ -265,30 +272,33 @@ class Operator(object): PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" ) # if self.__Matrix is not None: - HxValue = [] + _HxValue = [] for paire in _xuValue: _xValue, _uValue = paire + _xValue = numpy.matrix(numpy.ravel(_xValue)).T self.__addOneMatrixCall() - HxValue.append( self.__Matrix * _xValue ) + _HxValue.append( self.__Matrix * _xValue ) else: - HxValue = [] + _xuArgs = [] for paire in _xuValue: - _xuValue = [] _xValue, _uValue = paire if _uValue is not None: - _xuValue.append( paire ) + _xuArgs.append( paire ) else: - _xuValue.append( _xValue ) - self.__addOneMethodCall( len(_xuValue) ) + _xuArgs.append( _xValue ) + self.__addOneMethodCall( len(_xuArgs) ) if self.__extraArgs is None: - HxValue = self.__Method( _xuValue ) # Calcul MF + _HxValue = self.__Method( _xuArgs ) # Calcul MF else: - HxValue = self.__Method( _xuValue, self.__extraArgs ) # Calcul MF + _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF + # + if returnSerieAsArrayMatrix: + _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1) # - if argsAsSerie: return HxValue - else: return HxValue[-1] + if argsAsSerie: return _HxValue + else: return _HxValue[-1] - def appliedInXTo(self, paires, argsAsSerie = False): + def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False): """ Permet de restituer le résultat de l'application de l'opérateur à une série d'arguments xValue, sachant que l'opérateur est valable en @@ -309,20 +319,24 @@ class Operator(object): PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" ) # if self.__Matrix is not None: - HxValue = [] + _HxValue = [] for paire in _nxValue: _xNominal, _xValue = paire + _xValue = numpy.matrix(numpy.ravel(_xValue)).T self.__addOneMatrixCall() - HxValue.append( self.__Matrix * _xValue ) + _HxValue.append( self.__Matrix * _xValue ) else: self.__addOneMethodCall( len(_nxValue) ) if self.__extraArgs is None: - HxValue = self.__Method( _nxValue ) # Calcul MF + _HxValue = self.__Method( _nxValue ) # Calcul MF else: - HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF + _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF + # + if returnSerieAsArrayMatrix: + _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1) # - if argsAsSerie: return HxValue - else: return HxValue[-1] + if argsAsSerie: return _HxValue + else: return _HxValue[-1] def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False): """ @@ -510,6 +524,7 @@ class FullOperator(object): centeredDF = __Function["CenteredFiniteDifference"], increment = __Function["DifferentialIncrement"], dX = __Function["withdX"], + extraArguments = self.__extraArgs, avoidingRedundancy = __Function["withAvoidingRedundancy"], toleranceInRedundancy = __Function["withToleranceInRedundancy"], lenghtOfRedundancy = __Function["withLenghtOfRedundancy"], @@ -591,6 +606,7 @@ class Algorithm(object): - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations - CostFunctionJo : partie observations de la fonction-coût : Jo - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations + - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0 - CurrentOptimum : état optimal courant lors d'itérations - CurrentState : état courant lors d'itérations - GradientOfCostFunctionJ : gradient de la fonction-coût globale @@ -608,6 +624,7 @@ class Algorithm(object): - OMB : Observation moins Background : Y - Xb - ForecastState : état prédit courant lors d'itérations - Residu : dans le cas des algorithmes de vérification + - SampledStateForQuantiles : échantillons d'états pour l'estimation des quantiles - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche - SigmaObs2 : indicateur de correction optimale des erreurs d'observation - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche @@ -645,6 +662,8 @@ class Algorithm(object): self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum") self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo") self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum") + self.StoredVariables["CurrentEnsembleState"] = Persistence.OneMatrix(name = "CurrentEnsembleState") + self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber") self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum") self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState") self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState") @@ -663,6 +682,7 @@ class Algorithm(object): self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA") self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB") self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu") + self.StoredVariables["SampledStateForQuantiles"] = Persistence.OneMatrix(name = "SampledStateForQuantiles") self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2") self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2") self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground") @@ -680,10 +700,11 @@ class Algorithm(object): self.__canonical_parameter_name["algorithm"] = "Algorithm" self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations" - def _pre_run(self, Parameters, Xb=None, Y=None, R=None, B=None, Q=None ): + def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ): "Pré-calcul" logging.debug("%s Lancement", self._name) logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio"))) + self._getTimeState(reset=True) # # Mise a jour des paramètres internes avec le contenu de Parameters, en # reprenant les valeurs par défauts pour toutes celles non définies @@ -691,41 +712,82 @@ class Algorithm(object): for k, v in self.__variable_names_not_public.items(): if k not in self._parameters: self.__setParameters( {k:v} ) # - # Corrections et compléments - def __test_vvalue(argument, variable, argname): + # Corrections et compléments des vecteurs + def __test_vvalue(argument, variable, argname, symbol=None): + if symbol is None: symbol = variable if argument is None: if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]: - raise ValueError("%s %s vector %s has to be properly defined!"%(self._name,argname,variable)) + raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol)) elif variable in self.__required_inputs["RequiredInputValues"]["optional"]: - logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,variable)) + logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol)) else: - logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,variable)) + logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol)) else: - logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,variable,numpy.array(argument).size)) + logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size)) return 0 __test_vvalue( Xb, "Xb", "Background or initial state" ) __test_vvalue( Y, "Y", "Observation" ) + __test_vvalue( U, "U", "Control" ) # - def __test_cvalue(argument, variable, argname): + # Corrections et compléments des covariances + def __test_cvalue(argument, variable, argname, symbol=None): + if symbol is None: symbol = variable if argument is None: if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]: - raise ValueError("%s %s error covariance matrix %s has to be properly defined!"%(self._name,argname,variable)) + raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol)) elif variable in self.__required_inputs["RequiredInputValues"]["optional"]: - logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,variable)) + logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol)) else: - logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,variable)) + logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol)) else: - logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,variable)) + logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,symbol)) return 0 - __test_cvalue( R, "R", "Observation" ) __test_cvalue( B, "B", "Background" ) + __test_cvalue( R, "R", "Observation" ) __test_cvalue( Q, "Q", "Evolution" ) # + # Corrections et compléments des opérateurs + def __test_ovalue(argument, variable, argname, symbol=None): + if symbol is None: symbol = variable + if argument is None or (isinstance(argument,dict) and len(argument)==0): + if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]: + raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol)) + elif variable in self.__required_inputs["RequiredInputValues"]["optional"]: + logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol)) + else: + logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol)) + else: + logging.debug("%s %s operator %s is set."%(self._name,argname,symbol)) + return 0 + __test_ovalue( HO, "HO", "Observation", "H" ) + __test_ovalue( EM, "EM", "Evolution", "M" ) + __test_ovalue( CM, "CM", "Control Model", "C" ) + # + # Corrections et compléments des bornes if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0): logging.debug("%s Prise en compte des bornes effectuee"%(self._name,)) else: self._parameters["Bounds"] = None # + # Corrections et compléments de l'initialisation en X + if "InitializationPoint" in self._parameters: + if Xb is not None: + if self._parameters["InitializationPoint"] is not None and hasattr(self._parameters["InitializationPoint"],'size'): + if self._parameters["InitializationPoint"].size != numpy.ravel(Xb).size: + raise ValueError("Incompatible size %i of forced initial point that have to replace the background of size %i" \ + %(self._parameters["InitializationPoint"].size,numpy.ravel(Xb).size)) + # Obtenu par typecast : numpy.ravel(self._parameters["InitializationPoint"]) + else: + self._parameters["InitializationPoint"] = numpy.ravel(Xb) + else: + if self._parameters["InitializationPoint"] is None: + raise ValueError("Forced initial point can not be set without any given Background or required value") + # + # Correction pour pallier a un bug de TNC sur le retour du Minimum + if "Minimizer" in self._parameters and self._parameters["Minimizer"] == "TNC": + self.setParameterValue("StoreInternalVariables",True) + # + # Verbosité et logging if logging.getLogger().level < logging.WARNING: self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1 if PlatformInfo.has_scipy: @@ -760,6 +822,7 @@ class Algorithm(object): logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0)) logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3)) logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio")) + logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1]) logging.debug("%s Terminé", self._name) return 0 @@ -816,7 +879,7 @@ class Algorithm(object): """ raise NotImplementedError("Mathematical assimilation calculation has not been implemented!") - def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None): + def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None, listadv = None): """ Permet de définir dans l'algorithme des paramètres requis et leurs caractéristiques par défaut. @@ -830,6 +893,7 @@ class Algorithm(object): "minval" : minval, "maxval" : maxval, "listval" : listval, + "listadv" : listadv, "message" : message, } self.__canonical_parameter_name[name.lower()] = name @@ -855,6 +919,7 @@ class Algorithm(object): minval = self.__required_parameters[__k]["minval"] maxval = self.__required_parameters[__k]["maxval"] listval = self.__required_parameters[__k]["listval"] + listadv = self.__required_parameters[__k]["listadv"] # if value is None and default is None: __val = None @@ -873,12 +938,14 @@ class Algorithm(object): raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval)) if maxval is not None and (numpy.array(__val, float) > maxval).any(): raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval)) - if listval is not None: + if listval is not None or listadv is not None: if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple): for v in __val: - if v not in listval: + if listval is not None and v in listval: continue + elif listadv is not None and v in listadv: continue + else: raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval)) - elif __val not in listval: + elif not (listval is not None and __val in listval) and not (listadv is not None and __val in listadv): raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval)) # return __val @@ -924,6 +991,33 @@ class Algorithm(object): pass logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k]) + def _getTimeState(self, reset=False): + """ + Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde + """ + if reset: + self.__initial_cpu_time = time.process_time() + self.__initial_elapsed_time = time.perf_counter() + return 0., 0. + else: + self.__cpu_time = time.process_time() - self.__initial_cpu_time + self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time + return self.__cpu_time, self.__elapsed_time + + def _StopOnTimeLimit(self, X=None, withReason=False): + "Stop criteria on time limit: True/False [+ Reason]" + c, e = self._getTimeState() + if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]: + __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"]) + elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]: + __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"]) + else: + __SC, __SR = False, "" + if withReason: + return __SC, __SR + else: + return __SC + # ============================================================================== class AlgorithmAndParameters(object): """ @@ -1333,7 +1427,7 @@ class RegulationAndParameters(object): self.__P.update( dict(__Dict) ) # if __Algo is not None: - self.__P.update( {"Algorithm":__Algo} ) + self.__P.update( {"Algorithm":str(__Algo)} ) def get(self, key = None): "Vérifie l'existence d'une clé de variable ou de paramètres" @@ -1382,19 +1476,11 @@ class DataObserver(object): else: raise ValueError("setting an observer has to be done over a variable name or a list of variable names.") # - if asString is not None: - __FunctionText = asString - elif (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates): - __FunctionText = Templates.ObserverTemplates[asTemplate] - elif asScript is not None: - __FunctionText = Interfaces.ImportFromScript(asScript).getstring() - else: - __FunctionText = "" - __Function = ObserverF(__FunctionText) - # if asObsObject is not None: self.__O = asObsObject else: + __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript)) + __Function = Observer2Func(__FunctionText) self.__O = __Function.getfunc() # for k in range(len(self.__V)): @@ -1413,6 +1499,89 @@ class DataObserver(object): "x.__str__() <==> str(x)" return str(self.__V)+"\n"+str(self.__O) +# ============================================================================== +class UserScript(object): + """ + Classe générale d'interface de type texte de script utilisateur + """ + def __init__(self, + name = "GenericUserScript", + asTemplate = None, + asString = None, + asScript = None, + ): + """ + """ + self.__name = str(name) + # + if asString is not None: + self.__F = asString + elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates): + self.__F = Templates.UserPostAnalysisTemplates[asTemplate] + elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates): + self.__F = Templates.ObserverTemplates[asTemplate] + elif asScript is not None: + self.__F = Interfaces.ImportFromScript(asScript).getstring() + else: + self.__F = "" + + def __repr__(self): + "x.__repr__() <==> repr(x)" + return repr(self.__F) + + def __str__(self): + "x.__str__() <==> str(x)" + return str(self.__F) + +# ============================================================================== +class ExternalParameters(object): + """ + Classe générale d'interface de type texte de script utilisateur + """ + def __init__(self, + name = "GenericExternalParameters", + asDict = None, + asScript = None, + ): + """ + """ + self.__name = str(name) + self.__P = {} + # + self.updateParameters( asDict, asScript ) + + def updateParameters(self, + asDict = None, + asScript = None, + ): + "Mise a jour des parametres" + if asDict is None and asScript is not None: + __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" ) + else: + __Dict = asDict + # + if __Dict is not None: + self.__P.update( dict(__Dict) ) + + def get(self, key = None): + if key in self.__P: + return self.__P[key] + else: + return list(self.__P.keys()) + + def keys(self): + return list(self.__P.keys()) + + def pop(self, k, d): + return self.__P.pop(k, d) + + def items(self): + return self.__P.items() + + def __contains__(self, key=None): + "D.__contains__(k) -> True if D has a key k, else False" + return key in self.__P + # ============================================================================== class State(object): """ @@ -1591,15 +1760,20 @@ class Covariance(object): __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject # if __Scalar is not None: - if numpy.matrix(__Scalar).size != 1: - raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size) + if isinstance(__Scalar, str): + __Scalar = __Scalar.replace(";"," ").replace(","," ").split() + if len(__Scalar) > 0: __Scalar = __Scalar[0] + if numpy.array(__Scalar).size != 1: + raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.array(__Scalar).size) self.__is_scalar = True self.__C = numpy.abs( float(__Scalar) ) self.shape = (0,0) self.size = 0 elif __Vector is not None: + if isinstance(__Vector, str): + __Vector = __Vector.replace(";"," ").replace(","," ").split() self.__is_vector = True - self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) ) + self.__C = numpy.abs( numpy.array( numpy.ravel( __Vector ), dtype=float ) ) self.shape = (self.__C.size,self.__C.size) self.size = self.__C.size**2 elif __Matrix is not None: @@ -1610,7 +1784,7 @@ class Covariance(object): elif __Object is not None: self.__is_object = True self.__C = __Object - for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"): + for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__matmul__","__mul__","__rmatmul__","__rmul__"): if not hasattr(self.__C,at): raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at)) if hasattr(self.__C,"shape"): @@ -1669,12 +1843,12 @@ class Covariance(object): def getI(self): "Inversion" if self.ismatrix(): - return Covariance(self.__name+"I", asCovariance = self.__C.I ) + return Covariance(self.__name+"I", asCovariance = numpy.linalg.inv(self.__C) ) elif self.isvector(): return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C ) elif self.isscalar(): return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C ) - elif self.isobject(): + elif self.isobject() and hasattr(self.__C,"getI"): return Covariance(self.__name+"I", asCovObject = self.__C.getI() ) else: return None # Indispensable @@ -1687,8 +1861,10 @@ class Covariance(object): return Covariance(self.__name+"T", asEyeByVector = self.__C ) elif self.isscalar(): return Covariance(self.__name+"T", asEyeByScalar = self.__C ) - elif self.isobject(): + elif self.isobject() and hasattr(self.__C,"getT"): return Covariance(self.__name+"T", asCovObject = self.__C.getT() ) + else: + raise AttributeError("the %s covariance matrix has no getT attribute."%(self.__name,)) def cholesky(self): "Décomposition de Cholesky" @@ -1700,17 +1876,49 @@ class Covariance(object): return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) ) elif self.isobject() and hasattr(self.__C,"cholesky"): return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() ) + else: + raise AttributeError("the %s covariance matrix has no cholesky attribute."%(self.__name,)) def choleskyI(self): "Inversion de la décomposition de Cholesky" if self.ismatrix(): - return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I ) + return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.linalg.cholesky(self.__C)) ) elif self.isvector(): return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) ) elif self.isscalar(): return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) ) elif self.isobject() and hasattr(self.__C,"choleskyI"): return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() ) + else: + raise AttributeError("the %s covariance matrix has no choleskyI attribute."%(self.__name,)) + + def sqrtm(self): + "Racine carrée matricielle" + if self.ismatrix(): + import scipy + return Covariance(self.__name+"C", asCovariance = numpy.real(scipy.linalg.sqrtm(self.__C)) ) + elif self.isvector(): + return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) ) + elif self.isscalar(): + return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) ) + elif self.isobject() and hasattr(self.__C,"sqrtm"): + return Covariance(self.__name+"C", asCovObject = self.__C.sqrtm() ) + else: + raise AttributeError("the %s covariance matrix has no sqrtm attribute."%(self.__name,)) + + def sqrtmI(self): + "Inversion de la racine carrée matricielle" + if self.ismatrix(): + import scipy + return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.real(scipy.linalg.sqrtm(self.__C))) ) + elif self.isvector(): + return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) ) + elif self.isscalar(): + return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) ) + elif self.isobject() and hasattr(self.__C,"sqrtmI"): + return Covariance(self.__name+"H", asCovObject = self.__C.sqrtmI() ) + else: + raise AttributeError("the %s covariance matrix has no sqrtmI attribute."%(self.__name,)) def diag(self, msize=None): "Diagonale de la matrice" @@ -1723,22 +1931,10 @@ class Covariance(object): raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,)) else: return self.__C * numpy.ones(int(msize)) - elif self.isobject(): + elif self.isobject() and hasattr(self.__C,"diag"): return self.__C.diag() - - def asfullmatrix(self, msize=None): - "Matrice pleine" - if self.ismatrix(): - return self.__C - elif self.isvector(): - return numpy.matrix( numpy.diag(self.__C), float ) - elif self.isscalar(): - if msize is None: - raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,)) - else: - return numpy.matrix( self.__C * numpy.eye(int(msize)), float ) - elif self.isobject() and hasattr(self.__C,"asfullmatrix"): - return self.__C.asfullmatrix() + else: + raise AttributeError("the %s covariance matrix has no diag attribute."%(self.__name,)) def trace(self, msize=None): "Trace de la matrice" @@ -1753,6 +1949,28 @@ class Covariance(object): return self.__C * int(msize) elif self.isobject(): return self.__C.trace() + else: + raise AttributeError("the %s covariance matrix has no trace attribute."%(self.__name,)) + + def asfullmatrix(self, msize=None): + "Matrice pleine" + if self.ismatrix(): + return numpy.asarray(self.__C) + elif self.isvector(): + return numpy.asarray( numpy.diag(self.__C), float ) + elif self.isscalar(): + if msize is None: + raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,)) + else: + return numpy.asarray( self.__C * numpy.eye(int(msize)), float ) + elif self.isobject() and hasattr(self.__C,"asfullmatrix"): + return self.__C.asfullmatrix() + else: + raise AttributeError("the %s covariance matrix has no asfullmatrix attribute."%(self.__name,)) + + def assparsematrix(self): + "Valeur sparse" + return self.__C def getO(self): return self @@ -1795,6 +2013,36 @@ class Covariance(object): "x.__neg__() <==> -x" return - self.__C + def __matmul__(self, other): + "x.__mul__(y) <==> x@y" + if self.ismatrix() and isinstance(other, (int, float)): + return numpy.asarray(self.__C) * other + elif self.ismatrix() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)): + if numpy.ravel(other).size == self.shape[1]: # Vecteur + return numpy.ravel(self.__C @ numpy.ravel(other)) + elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice + return numpy.asarray(self.__C) @ numpy.asarray(other) + else: + raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asarray(other).shape,self.__name)) + elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)): + if numpy.ravel(other).size == self.shape[1]: # Vecteur + return numpy.ravel(self.__C) * numpy.ravel(other) + elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice + return numpy.ravel(self.__C).reshape((-1,1)) * numpy.asarray(other) + else: + raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name)) + elif self.isscalar() and isinstance(other,numpy.matrix): + return numpy.asarray(self.__C * other) + elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)): + if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1: + return self.__C * numpy.ravel(other) + else: + return self.__C * numpy.asarray(other) + elif self.isobject(): + return self.__C.__matmul__(other) + else: + raise NotImplementedError("%s covariance matrix __matmul__ method not available for %s type!"%(self.__name,type(other))) + def __mul__(self, other): "x.__mul__(y) <==> x*y" if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)): @@ -1825,6 +2073,31 @@ class Covariance(object): else: raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other))) + def __rmatmul__(self, other): + "x.__rmul__(y) <==> y@x" + if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)): + return other * self.__C + elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)): + if numpy.ravel(other).size == self.shape[1]: # Vecteur + return numpy.asmatrix(numpy.ravel(other)) * self.__C + elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice + return numpy.asmatrix(other) * self.__C + else: + raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name)) + elif self.isvector() and isinstance(other,numpy.matrix): + if numpy.ravel(other).size == self.shape[0]: # Vecteur + return numpy.asmatrix(numpy.ravel(other) * self.__C) + elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice + return numpy.asmatrix(numpy.array(other) * self.__C) + else: + raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name)) + elif self.isscalar() and isinstance(other,numpy.matrix): + return other * self.__C + elif self.isobject(): + return self.__C.__rmatmul__(other) + else: + raise NotImplementedError("%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name,type(other))) + def __rmul__(self, other): "x.__rmul__(y) <==> y*x" if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)): @@ -1855,7 +2128,7 @@ class Covariance(object): return self.shape[0] # ============================================================================== -class ObserverF(object): +class Observer2Func(object): """ Creation d'une fonction d'observateur a partir de son texte """ @@ -1950,14 +2223,7 @@ def MultiFonction( # # Calculs effectifs if __mpEnabled: - _jobs = [] - if _extraArguments is None: - _jobs = __xserie - elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)): - for __xvalue in __xserie: - _jobs.append( [__xvalue, ] + list(_extraArguments) ) - else: - raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),)) + _jobs = __xserie # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),)) import multiprocessing with multiprocessing.Pool(__mpWorkers) as pool: