X-Git-Url: http://git.salome-platform.org/gitweb/?a=blobdiff_plain;f=src%2FdaComposant%2FdaCore%2FBasicObjects.py;h=7b8953ac6cb8a0c3d3d811d2e512f379403717f0;hb=fce485f3b022e74f42a857c893021b90bc3cf838;hp=429c2bc957fa3635cca588d06d476265bcb521bd;hpb=d4f0a8f335b824294ea92f5064c43bf9b7a010dc;p=modules%2Fadao.git diff --git a/src/daComposant/daCore/BasicObjects.py b/src/daComposant/daCore/BasicObjects.py index 429c2bc..7b8953a 100644 --- a/src/daComposant/daCore/BasicObjects.py +++ b/src/daComposant/daCore/BasicObjects.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright (C) 2008-2020 EDF R&D +# Copyright (C) 2008-2021 EDF R&D # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public @@ -57,44 +57,46 @@ class CacheManager(object): def clearCache(self): "Vide le cache" - self.__listOPCV = [] # Previous Calculated Points, Results, Point Norms, Operator + self.__listOPCV = [] self.__seenNames = [] - # logging.debug("CM Tolerance de determination des doublons : %.2e", self.__tolerBP) - def wasCalculatedIn(self, xValue, oName="" ): #, info="" ): + def wasCalculatedIn(self, xValue, oName="" ): "Vérifie l'existence d'un calcul correspondant à la valeur" __alc = False __HxV = None if self.__enabled: for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1): - if not hasattr(xValue, 'size') or (str(oName) != self.__listOPCV[i][3]) or (xValue.size != self.__listOPCV[i][0].size): - # logging.debug("CM Différence de la taille %s de X et de celle %s du point %i déjà calculé", xValue.shape,i,self.__listOPCP[i].shape) + if not hasattr(xValue, 'size'): pass - elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < self.__tolerBP * self.__listOPCV[i][2]: + elif (str(oName) != self.__listOPCV[i][3]): + pass + elif (xValue.size != self.__listOPCV[i][0].size): + pass + elif (numpy.ravel(xValue)[0] - self.__listOPCV[i][0][0]) > (self.__tolerBP * self.__listOPCV[i][2] / self.__listOPCV[i][0].size): + pass + elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < (self.__tolerBP * self.__listOPCV[i][2]): __alc = True __HxV = self.__listOPCV[i][1] - # logging.debug("CM Cas%s déja calculé, portant le numéro %i", info, i) break return __alc, __HxV def storeValueInX(self, xValue, HxValue, oName="" ): "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x" if self.__lenghtOR < 0: - self.__lenghtOR = 2 * xValue.size + 2 + self.__lenghtOR = 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2 self.__initlnOR = self.__lenghtOR self.__seenNames.append(str(oName)) if str(oName) not in self.__seenNames: # Etend la liste si nouveau - self.__lenghtOR += 2 * xValue.size + 2 + self.__lenghtOR += 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2 self.__initlnOR += self.__lenghtOR self.__seenNames.append(str(oName)) while len(self.__listOPCV) > self.__lenghtOR: - # logging.debug("CM Réduction de la liste des cas à %i éléments par suppression du premier", self.__lenghtOR) self.__listOPCV.pop(0) self.__listOPCV.append( ( - copy.copy(numpy.ravel(xValue)), - copy.copy(HxValue), - numpy.linalg.norm(xValue), - str(oName), + copy.copy(numpy.ravel(xValue)), # 0 Previous point + copy.copy(HxValue), # 1 Previous value + numpy.linalg.norm(xValue), # 2 Norm + str(oName), # 3 Operator name ) ) def disable(self): @@ -179,7 +181,7 @@ class Operator(object): "Renvoie le type" return self.__Type - def appliedTo(self, xValue, HValue = None, argsAsSerie = False): + def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False): """ Permet de restituer le résultat de l'application de l'opérateur à une série d'arguments xValue. Cette méthode se contente d'appliquer, chaque @@ -203,13 +205,13 @@ class Operator(object): # if _HValue is not None: assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue" - HxValue = [] + _HxValue = [] for i in range(len(_HValue)): - HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T ) + _HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T ) if self.__AvoidRC: - Operator.CM.storeValueInX(_xValue[i],HxValue[-1],self.__name) + Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name) else: - HxValue = [] + _HxValue = [] _xserie = [] _hindex = [] for i, xv in enumerate(_xValue): @@ -224,13 +226,14 @@ class Operator(object): else: if self.__Matrix is not None: self.__addOneMatrixCall() - _hv = self.__Matrix * xv + _xv = numpy.matrix(numpy.ravel(xv)).T + _hv = self.__Matrix * _xv else: self.__addOneMethodCall() _xserie.append( xv ) _hindex.append( i ) _hv = None - HxValue.append( _hv ) + _HxValue.append( _hv ) # if len(_xserie)>0 and self.__Matrix is None: if self.__extraArgs is None: @@ -242,14 +245,17 @@ class Operator(object): for i in _hindex: _xv = _xserie.pop(0) _hv = _hserie.pop(0) - HxValue[i] = _hv + _HxValue[i] = _hv if self.__AvoidRC: Operator.CM.storeValueInX(_xv,_hv,self.__name) # - if argsAsSerie: return HxValue - else: return HxValue[-1] + if returnSerieAsArrayMatrix: + _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1) + # + if argsAsSerie: return _HxValue + else: return _HxValue[-1] - def appliedControledFormTo(self, paires, argsAsSerie = False): + def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False): """ Permet de restituer le résultat de l'application de l'opérateur à des paires (xValue, uValue). Cette méthode se contente d'appliquer, son @@ -266,30 +272,33 @@ class Operator(object): PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" ) # if self.__Matrix is not None: - HxValue = [] + _HxValue = [] for paire in _xuValue: _xValue, _uValue = paire + _xValue = numpy.matrix(numpy.ravel(_xValue)).T self.__addOneMatrixCall() - HxValue.append( self.__Matrix * _xValue ) + _HxValue.append( self.__Matrix * _xValue ) else: - HxValue = [] + _xuArgs = [] for paire in _xuValue: - _xuValue = [] _xValue, _uValue = paire if _uValue is not None: - _xuValue.append( paire ) + _xuArgs.append( paire ) else: - _xuValue.append( _xValue ) - self.__addOneMethodCall( len(_xuValue) ) + _xuArgs.append( _xValue ) + self.__addOneMethodCall( len(_xuArgs) ) if self.__extraArgs is None: - HxValue = self.__Method( _xuValue ) # Calcul MF + _HxValue = self.__Method( _xuArgs ) # Calcul MF else: - HxValue = self.__Method( _xuValue, self.__extraArgs ) # Calcul MF + _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF + # + if returnSerieAsArrayMatrix: + _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1) # - if argsAsSerie: return HxValue - else: return HxValue[-1] + if argsAsSerie: return _HxValue + else: return _HxValue[-1] - def appliedInXTo(self, paires, argsAsSerie = False): + def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False): """ Permet de restituer le résultat de l'application de l'opérateur à une série d'arguments xValue, sachant que l'opérateur est valable en @@ -310,20 +319,24 @@ class Operator(object): PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" ) # if self.__Matrix is not None: - HxValue = [] + _HxValue = [] for paire in _nxValue: _xNominal, _xValue = paire + _xValue = numpy.matrix(numpy.ravel(_xValue)).T self.__addOneMatrixCall() - HxValue.append( self.__Matrix * _xValue ) + _HxValue.append( self.__Matrix * _xValue ) else: self.__addOneMethodCall( len(_nxValue) ) if self.__extraArgs is None: - HxValue = self.__Method( _nxValue ) # Calcul MF + _HxValue = self.__Method( _nxValue ) # Calcul MF else: - HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF + _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF + # + if returnSerieAsArrayMatrix: + _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1) # - if argsAsSerie: return HxValue - else: return HxValue[-1] + if argsAsSerie: return _HxValue + else: return _HxValue[-1] def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False): """ @@ -511,6 +524,7 @@ class FullOperator(object): centeredDF = __Function["CenteredFiniteDifference"], increment = __Function["DifferentialIncrement"], dX = __Function["withdX"], + extraArguments = self.__extraArgs, avoidingRedundancy = __Function["withAvoidingRedundancy"], toleranceInRedundancy = __Function["withToleranceInRedundancy"], lenghtOfRedundancy = __Function["withLenghtOfRedundancy"], @@ -610,6 +624,7 @@ class Algorithm(object): - OMB : Observation moins Background : Y - Xb - ForecastState : état prédit courant lors d'itérations - Residu : dans le cas des algorithmes de vérification + - SampledStateForQuantiles : échantillons d'états pour l'estimation des quantiles - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche - SigmaObs2 : indicateur de correction optimale des erreurs d'observation - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche @@ -647,7 +662,8 @@ class Algorithm(object): self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum") self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo") self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum") - self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber") + self.StoredVariables["CurrentEnsembleState"] = Persistence.OneMatrix(name = "CurrentEnsembleState") + self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber") self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum") self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState") self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState") @@ -666,6 +682,7 @@ class Algorithm(object): self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA") self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB") self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu") + self.StoredVariables["SampledStateForQuantiles"] = Persistence.OneMatrix(name = "SampledStateForQuantiles") self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2") self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2") self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground") @@ -746,11 +763,31 @@ class Algorithm(object): __test_ovalue( EM, "EM", "Evolution", "M" ) __test_ovalue( CM, "CM", "Control Model", "C" ) # + # Corrections et compléments des bornes if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0): logging.debug("%s Prise en compte des bornes effectuee"%(self._name,)) else: self._parameters["Bounds"] = None # + # Corrections et compléments de l'initialisation en X + if "InitializationPoint" in self._parameters: + if Xb is not None: + if self._parameters["InitializationPoint"] is not None and hasattr(self._parameters["InitializationPoint"],'size'): + if self._parameters["InitializationPoint"].size != numpy.ravel(Xb).size: + raise ValueError("Incompatible size %i of forced initial point that have to replace the background of size %i" \ + %(self._parameters["InitializationPoint"].size,numpy.ravel(Xb).size)) + # Obtenu par typecast : numpy.ravel(self._parameters["InitializationPoint"]) + else: + self._parameters["InitializationPoint"] = numpy.ravel(Xb) + else: + if self._parameters["InitializationPoint"] is None: + raise ValueError("Forced initial point can not be set without any given Background or required value") + # + # Correction pour pallier a un bug de TNC sur le retour du Minimum + if "Minimizer" in self._parameters and self._parameters["Minimizer"] == "TNC": + self.setParameterValue("StoreInternalVariables",True) + # + # Verbosité et logging if logging.getLogger().level < logging.WARNING: self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1 if PlatformInfo.has_scipy: @@ -842,7 +879,7 @@ class Algorithm(object): """ raise NotImplementedError("Mathematical assimilation calculation has not been implemented!") - def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None): + def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None, listadv = None): """ Permet de définir dans l'algorithme des paramètres requis et leurs caractéristiques par défaut. @@ -856,6 +893,7 @@ class Algorithm(object): "minval" : minval, "maxval" : maxval, "listval" : listval, + "listadv" : listadv, "message" : message, } self.__canonical_parameter_name[name.lower()] = name @@ -881,6 +919,7 @@ class Algorithm(object): minval = self.__required_parameters[__k]["minval"] maxval = self.__required_parameters[__k]["maxval"] listval = self.__required_parameters[__k]["listval"] + listadv = self.__required_parameters[__k]["listadv"] # if value is None and default is None: __val = None @@ -899,12 +938,14 @@ class Algorithm(object): raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval)) if maxval is not None and (numpy.array(__val, float) > maxval).any(): raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval)) - if listval is not None: + if listval is not None or listadv is not None: if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple): for v in __val: - if v not in listval: + if listval is not None and v in listval: continue + elif listadv is not None and v in listadv: continue + else: raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval)) - elif __val not in listval: + elif not (listval is not None and __val in listval) and not (listadv is not None and __val in listadv): raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval)) # return __val @@ -963,6 +1004,20 @@ class Algorithm(object): self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time return self.__cpu_time, self.__elapsed_time + def _StopOnTimeLimit(self, X=None, withReason=False): + "Stop criteria on time limit: True/False [+ Reason]" + c, e = self._getTimeState() + if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]: + __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"]) + elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]: + __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"]) + else: + __SC, __SR = False, "" + if withReason: + return __SC, __SR + else: + return __SC + # ============================================================================== class AlgorithmAndParameters(object): """ @@ -1705,15 +1760,20 @@ class Covariance(object): __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject # if __Scalar is not None: - if numpy.matrix(__Scalar).size != 1: - raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size) + if isinstance(__Scalar, str): + __Scalar = __Scalar.replace(";"," ").replace(","," ").split() + if len(__Scalar) > 0: __Scalar = __Scalar[0] + if numpy.array(__Scalar).size != 1: + raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.array(__Scalar).size) self.__is_scalar = True self.__C = numpy.abs( float(__Scalar) ) self.shape = (0,0) self.size = 0 elif __Vector is not None: + if isinstance(__Vector, str): + __Vector = __Vector.replace(";"," ").replace(","," ").split() self.__is_vector = True - self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) ) + self.__C = numpy.abs( numpy.array( numpy.ravel( __Vector ), dtype=float ) ) self.shape = (self.__C.size,self.__C.size) self.size = self.__C.size**2 elif __Matrix is not None: @@ -1724,7 +1784,7 @@ class Covariance(object): elif __Object is not None: self.__is_object = True self.__C = __Object - for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"): + for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__matmul__","__mul__","__rmatmul__","__rmul__"): if not hasattr(self.__C,at): raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at)) if hasattr(self.__C,"shape"): @@ -1783,12 +1843,12 @@ class Covariance(object): def getI(self): "Inversion" if self.ismatrix(): - return Covariance(self.__name+"I", asCovariance = self.__C.I ) + return Covariance(self.__name+"I", asCovariance = numpy.linalg.inv(self.__C) ) elif self.isvector(): return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C ) elif self.isscalar(): return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C ) - elif self.isobject(): + elif self.isobject() and hasattr(self.__C,"getI"): return Covariance(self.__name+"I", asCovObject = self.__C.getI() ) else: return None # Indispensable @@ -1801,8 +1861,10 @@ class Covariance(object): return Covariance(self.__name+"T", asEyeByVector = self.__C ) elif self.isscalar(): return Covariance(self.__name+"T", asEyeByScalar = self.__C ) - elif self.isobject(): + elif self.isobject() and hasattr(self.__C,"getT"): return Covariance(self.__name+"T", asCovObject = self.__C.getT() ) + else: + raise AttributeError("the %s covariance matrix has no getT attribute."%(self.__name,)) def cholesky(self): "Décomposition de Cholesky" @@ -1814,41 +1876,49 @@ class Covariance(object): return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) ) elif self.isobject() and hasattr(self.__C,"cholesky"): return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() ) + else: + raise AttributeError("the %s covariance matrix has no cholesky attribute."%(self.__name,)) def choleskyI(self): "Inversion de la décomposition de Cholesky" if self.ismatrix(): - return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I ) + return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.linalg.cholesky(self.__C)) ) elif self.isvector(): return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) ) elif self.isscalar(): return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) ) elif self.isobject() and hasattr(self.__C,"choleskyI"): return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() ) + else: + raise AttributeError("the %s covariance matrix has no choleskyI attribute."%(self.__name,)) def sqrtm(self): "Racine carrée matricielle" if self.ismatrix(): import scipy - return Covariance(self.__name+"C", asCovariance = scipy.linalg.sqrtm(self.__C) ) + return Covariance(self.__name+"C", asCovariance = numpy.real(scipy.linalg.sqrtm(self.__C)) ) elif self.isvector(): return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) ) elif self.isscalar(): return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) ) - elif self.isobject() and hasattr(self.__C,"sqrt"): - return Covariance(self.__name+"C", asCovObject = self.__C.sqrt() ) + elif self.isobject() and hasattr(self.__C,"sqrtm"): + return Covariance(self.__name+"C", asCovObject = self.__C.sqrtm() ) + else: + raise AttributeError("the %s covariance matrix has no sqrtm attribute."%(self.__name,)) def sqrtmI(self): "Inversion de la racine carrée matricielle" if self.ismatrix(): import scipy - return Covariance(self.__name+"H", asCovariance = scipy.linalg.sqrtm(self.__C).I ) + return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.real(scipy.linalg.sqrtm(self.__C))) ) elif self.isvector(): return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) ) elif self.isscalar(): return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) ) - elif self.isobject() and hasattr(self.__C,"sqrtI"): - return Covariance(self.__name+"H", asCovObject = self.__C.sqrtI() ) + elif self.isobject() and hasattr(self.__C,"sqrtmI"): + return Covariance(self.__name+"H", asCovObject = self.__C.sqrtmI() ) + else: + raise AttributeError("the %s covariance matrix has no sqrtmI attribute."%(self.__name,)) def diag(self, msize=None): "Diagonale de la matrice" @@ -1861,22 +1931,10 @@ class Covariance(object): raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,)) else: return self.__C * numpy.ones(int(msize)) - elif self.isobject(): + elif self.isobject() and hasattr(self.__C,"diag"): return self.__C.diag() - - def asfullmatrix(self, msize=None): - "Matrice pleine" - if self.ismatrix(): - return self.__C - elif self.isvector(): - return numpy.matrix( numpy.diag(self.__C), float ) - elif self.isscalar(): - if msize is None: - raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,)) - else: - return numpy.matrix( self.__C * numpy.eye(int(msize)), float ) - elif self.isobject() and hasattr(self.__C,"asfullmatrix"): - return self.__C.asfullmatrix() + else: + raise AttributeError("the %s covariance matrix has no diag attribute."%(self.__name,)) def trace(self, msize=None): "Trace de la matrice" @@ -1891,6 +1949,28 @@ class Covariance(object): return self.__C * int(msize) elif self.isobject(): return self.__C.trace() + else: + raise AttributeError("the %s covariance matrix has no trace attribute."%(self.__name,)) + + def asfullmatrix(self, msize=None): + "Matrice pleine" + if self.ismatrix(): + return numpy.asarray(self.__C) + elif self.isvector(): + return numpy.asarray( numpy.diag(self.__C), float ) + elif self.isscalar(): + if msize is None: + raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,)) + else: + return numpy.asarray( self.__C * numpy.eye(int(msize)), float ) + elif self.isobject() and hasattr(self.__C,"asfullmatrix"): + return self.__C.asfullmatrix() + else: + raise AttributeError("the %s covariance matrix has no asfullmatrix attribute."%(self.__name,)) + + def assparsematrix(self): + "Valeur sparse" + return self.__C def getO(self): return self @@ -1933,6 +2013,36 @@ class Covariance(object): "x.__neg__() <==> -x" return - self.__C + def __matmul__(self, other): + "x.__mul__(y) <==> x@y" + if self.ismatrix() and isinstance(other, (int, float)): + return numpy.asarray(self.__C) * other + elif self.ismatrix() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)): + if numpy.ravel(other).size == self.shape[1]: # Vecteur + return numpy.ravel(self.__C @ numpy.ravel(other)) + elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice + return numpy.asarray(self.__C) @ numpy.asarray(other) + else: + raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asarray(other).shape,self.__name)) + elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)): + if numpy.ravel(other).size == self.shape[1]: # Vecteur + return numpy.ravel(self.__C) * numpy.ravel(other) + elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice + return numpy.ravel(self.__C).reshape((-1,1)) * numpy.asarray(other) + else: + raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name)) + elif self.isscalar() and isinstance(other,numpy.matrix): + return numpy.asarray(self.__C * other) + elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)): + if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1: + return self.__C * numpy.ravel(other) + else: + return self.__C * numpy.asarray(other) + elif self.isobject(): + return self.__C.__matmul__(other) + else: + raise NotImplementedError("%s covariance matrix __matmul__ method not available for %s type!"%(self.__name,type(other))) + def __mul__(self, other): "x.__mul__(y) <==> x*y" if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)): @@ -1963,6 +2073,31 @@ class Covariance(object): else: raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other))) + def __rmatmul__(self, other): + "x.__rmul__(y) <==> y@x" + if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)): + return other * self.__C + elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)): + if numpy.ravel(other).size == self.shape[1]: # Vecteur + return numpy.asmatrix(numpy.ravel(other)) * self.__C + elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice + return numpy.asmatrix(other) * self.__C + else: + raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name)) + elif self.isvector() and isinstance(other,numpy.matrix): + if numpy.ravel(other).size == self.shape[0]: # Vecteur + return numpy.asmatrix(numpy.ravel(other) * self.__C) + elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice + return numpy.asmatrix(numpy.array(other) * self.__C) + else: + raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name)) + elif self.isscalar() and isinstance(other,numpy.matrix): + return other * self.__C + elif self.isobject(): + return self.__C.__rmatmul__(other) + else: + raise NotImplementedError("%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name,type(other))) + def __rmul__(self, other): "x.__rmul__(y) <==> y*x" if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)): @@ -2088,14 +2223,7 @@ def MultiFonction( # # Calculs effectifs if __mpEnabled: - _jobs = [] - if _extraArguments is None: - _jobs = __xserie - elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)): - for __xvalue in __xserie: - _jobs.append( [__xvalue, ] + list(_extraArguments) ) - else: - raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),)) + _jobs = __xserie # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),)) import multiprocessing with multiprocessing.Pool(__mpWorkers) as pool: