1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2022 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
36 from functools import partial
37 from daCore import Persistence, PlatformInfo, Interfaces
38 from daCore import Templates
40 # ==============================================================================
41 class CacheManager(object):
43 Classe générale de gestion d'un cache de calculs
46 toleranceInRedundancy = 1.e-18,
47 lenghtOfRedundancy = -1,
50 Les caractéristiques de tolérance peuvent être modifiées à la création.
52 self.__tolerBP = float(toleranceInRedundancy)
53 self.__lenghtOR = int(lenghtOfRedundancy)
54 self.__initlnOR = self.__lenghtOR
64 def wasCalculatedIn(self, xValue, oName="" ):
65 "Vérifie l'existence d'un calcul correspondant à la valeur"
69 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
70 if not hasattr(xValue, 'size'):
72 elif (str(oName) != self.__listOPCV[i][3]):
74 elif (xValue.size != self.__listOPCV[i][0].size):
76 elif (numpy.ravel(xValue)[0] - self.__listOPCV[i][0][0]) > (self.__tolerBP * self.__listOPCV[i][2] / self.__listOPCV[i][0].size):
78 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < (self.__tolerBP * self.__listOPCV[i][2]):
80 __HxV = self.__listOPCV[i][1]
84 def storeValueInX(self, xValue, HxValue, oName="" ):
85 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
86 if self.__lenghtOR < 0:
87 self.__lenghtOR = 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
88 self.__initlnOR = self.__lenghtOR
89 self.__seenNames.append(str(oName))
90 if str(oName) not in self.__seenNames: # Etend la liste si nouveau
91 self.__lenghtOR += 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
92 self.__initlnOR += self.__lenghtOR
93 self.__seenNames.append(str(oName))
94 while len(self.__listOPCV) > self.__lenghtOR:
95 self.__listOPCV.pop(0)
96 self.__listOPCV.append( (
97 copy.copy(numpy.ravel(xValue)), # 0 Previous point
98 copy.copy(HxValue), # 1 Previous value
99 numpy.linalg.norm(xValue), # 2 Norm
100 str(oName), # 3 Operator name
105 self.__initlnOR = self.__lenghtOR
107 self.__enabled = False
111 self.__lenghtOR = self.__initlnOR
112 self.__enabled = True
114 # ==============================================================================
115 class Operator(object):
117 Classe générale d'interface de type opérateur simple
125 name = "GenericOperator",
128 avoidingRedundancy = True,
129 reducingMemoryUse = False,
130 inputAsMultiFunction = False,
131 enableMultiProcess = False,
132 extraArguments = None,
135 On construit un objet de ce type en fournissant, à l'aide de l'un des
136 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
139 - name : nom d'opérateur
140 - fromMethod : argument de type fonction Python
141 - fromMatrix : argument adapté au constructeur numpy.array/matrix
142 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
143 - reducingMemoryUse : booléen forçant (ou pas) des calculs moins
145 - inputAsMultiFunction : booléen indiquant une fonction explicitement
146 définie (ou pas) en multi-fonction
147 - extraArguments : arguments supplémentaires passés à la fonction de
148 base et ses dérivées (tuple ou dictionnaire)
150 self.__name = str(name)
151 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
152 self.__reduceM = bool( reducingMemoryUse )
153 self.__avoidRC = bool( avoidingRedundancy )
154 self.__inputAsMF = bool( inputAsMultiFunction )
155 self.__mpEnabled = bool( enableMultiProcess )
156 self.__extraArgs = extraArguments
157 if fromMethod is not None and self.__inputAsMF:
158 self.__Method = fromMethod # logtimer(fromMethod)
160 self.__Type = "Method"
161 elif fromMethod is not None and not self.__inputAsMF:
162 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
164 self.__Type = "Method"
165 elif fromMatrix is not None:
167 if isinstance(fromMatrix, str):
168 fromMatrix = PlatformInfo.strmatrix2liststr( fromMatrix )
169 self.__Matrix = numpy.asarray( fromMatrix, dtype=float )
170 self.__Type = "Matrix"
176 def disableAvoidingRedundancy(self):
178 Operator.CM.disable()
180 def enableAvoidingRedundancy(self):
185 Operator.CM.disable()
191 def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False):
193 Permet de restituer le résultat de l'application de l'opérateur à une
194 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
195 argument devant a priori être du bon type.
197 - les arguments par série sont :
198 - xValue : argument adapté pour appliquer l'opérateur
199 - HValue : valeur précalculée de l'opérateur en ce point
200 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
207 if HValue is not None:
211 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
213 if _HValue is not None:
214 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
216 for i in range(len(_HValue)):
217 _HxValue.append( _HValue[i] )
219 Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name)
224 for i, xv in enumerate(_xValue):
226 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
228 __alreadyCalculated = False
230 if __alreadyCalculated:
231 self.__addOneCacheCall()
234 if self.__Matrix is not None:
235 self.__addOneMatrixCall()
236 _hv = self.__Matrix @ numpy.ravel(xv)
238 self.__addOneMethodCall()
242 _HxValue.append( _hv )
244 if len(_xserie)>0 and self.__Matrix is None:
245 if self.__extraArgs is None:
246 _hserie = self.__Method( _xserie ) # Calcul MF
248 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
249 if not hasattr(_hserie, "pop"):
250 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
256 Operator.CM.storeValueInX(_xv,_hv,self.__name)
258 if returnSerieAsArrayMatrix:
259 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
261 if argsAsSerie: return _HxValue
262 else: return _HxValue[-1]
264 def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
266 Permet de restituer le résultat de l'application de l'opérateur à des
267 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
268 argument devant a priori être du bon type. Si la uValue est None,
269 on suppose que l'opérateur ne s'applique qu'à xValue.
271 - paires : les arguments par paire sont :
272 - xValue : argument X adapté pour appliquer l'opérateur
273 - uValue : argument U adapté pour appliquer l'opérateur
274 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
276 if argsAsSerie: _xuValue = paires
277 else: _xuValue = (paires,)
278 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
280 if self.__Matrix is not None:
282 for paire in _xuValue:
283 _xValue, _uValue = paire
284 self.__addOneMatrixCall()
285 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
288 for paire in _xuValue:
289 _xValue, _uValue = paire
290 if _uValue is not None:
291 _xuArgs.append( paire )
293 _xuArgs.append( _xValue )
294 self.__addOneMethodCall( len(_xuArgs) )
295 if self.__extraArgs is None:
296 _HxValue = self.__Method( _xuArgs ) # Calcul MF
298 _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF
300 if returnSerieAsArrayMatrix:
301 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
303 if argsAsSerie: return _HxValue
304 else: return _HxValue[-1]
306 def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
308 Permet de restituer le résultat de l'application de l'opérateur à une
309 série d'arguments xValue, sachant que l'opérateur est valable en
310 xNominal. Cette méthode se contente d'appliquer, son argument devant a
311 priori être du bon type. Si l'opérateur est linéaire car c'est une
312 matrice, alors il est valable en tout point nominal et xNominal peut
313 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
314 permet d'indiquer que l'argument est multi-paires.
316 - paires : les arguments par paire sont :
317 - xNominal : série d'arguments permettant de donner le point où
318 l'opérateur est construit pour être ensuite appliqué
319 - xValue : série d'arguments adaptés pour appliquer l'opérateur
320 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
322 if argsAsSerie: _nxValue = paires
323 else: _nxValue = (paires,)
324 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
326 if self.__Matrix is not None:
328 for paire in _nxValue:
329 _xNominal, _xValue = paire
330 self.__addOneMatrixCall()
331 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
333 self.__addOneMethodCall( len(_nxValue) )
334 if self.__extraArgs is None:
335 _HxValue = self.__Method( _nxValue ) # Calcul MF
337 _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
339 if returnSerieAsArrayMatrix:
340 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
342 if argsAsSerie: return _HxValue
343 else: return _HxValue[-1]
345 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
347 Permet de renvoyer l'opérateur sous la forme d'une matrice
349 if self.__Matrix is not None:
350 self.__addOneMatrixCall()
351 mValue = [self.__Matrix,]
352 elif not isinstance(ValueForMethodForm,str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
355 self.__addOneMethodCall( len(ValueForMethodForm) )
356 for _vfmf in ValueForMethodForm:
357 mValue.append( self.__Method(((_vfmf, None),)) )
359 self.__addOneMethodCall()
360 mValue = self.__Method(((ValueForMethodForm, None),))
362 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
364 if argsAsSerie: return mValue
365 else: return mValue[-1]
369 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
370 la forme d'une matrice
372 if self.__Matrix is not None:
373 return self.__Matrix.shape
375 raise ValueError("Matrix form of the operator is not available, nor the shape")
377 def nbcalls(self, which=None):
379 Renvoie les nombres d'évaluations de l'opérateur
382 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
383 self.__NbCallsAsMatrix,
384 self.__NbCallsAsMethod,
385 self.__NbCallsOfCached,
386 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
387 Operator.NbCallsAsMatrix,
388 Operator.NbCallsAsMethod,
389 Operator.NbCallsOfCached,
391 if which is None: return __nbcalls
392 else: return __nbcalls[which]
394 def __addOneMatrixCall(self):
395 "Comptabilise un appel"
396 self.__NbCallsAsMatrix += 1 # Decompte local
397 Operator.NbCallsAsMatrix += 1 # Decompte global
399 def __addOneMethodCall(self, nb = 1):
400 "Comptabilise un appel"
401 self.__NbCallsAsMethod += nb # Decompte local
402 Operator.NbCallsAsMethod += nb # Decompte global
404 def __addOneCacheCall(self):
405 "Comptabilise un appel"
406 self.__NbCallsOfCached += 1 # Decompte local
407 Operator.NbCallsOfCached += 1 # Decompte global
409 # ==============================================================================
410 class FullOperator(object):
412 Classe générale d'interface de type opérateur complet
413 (Direct, Linéaire Tangent, Adjoint)
416 name = "GenericFullOperator",
418 asOneFunction = None, # 1 Fonction
419 asThreeFunctions = None, # 3 Fonctions in a dictionary
420 asScript = None, # 1 or 3 Fonction(s) by script
421 asDict = None, # Parameters
423 extraArguments = None,
424 performancePrf = None,
425 inputAsMF = False,# Fonction(s) as Multi-Functions
430 self.__name = str(name)
431 self.__check = bool(toBeChecked)
432 self.__extraArgs = extraArguments
437 if (asDict is not None) and isinstance(asDict, dict):
438 __Parameters.update( asDict )
439 # Priorité à EnableMultiProcessingInDerivatives=True
440 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
441 __Parameters["EnableMultiProcessingInDerivatives"] = True
442 __Parameters["EnableMultiProcessingInEvaluation"] = False
443 if "EnableMultiProcessingInDerivatives" not in __Parameters:
444 __Parameters["EnableMultiProcessingInDerivatives"] = False
445 if __Parameters["EnableMultiProcessingInDerivatives"]:
446 __Parameters["EnableMultiProcessingInEvaluation"] = False
447 if "EnableMultiProcessingInEvaluation" not in __Parameters:
448 __Parameters["EnableMultiProcessingInEvaluation"] = False
449 if "withIncrement" in __Parameters: # Temporaire
450 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
451 # Le défaut est équivalent à "ReducedOverallRequirements"
452 __reduceM, __avoidRC = True, True
453 if performancePrf is not None:
454 if performancePrf == "ReducedAmountOfCalculation":
455 __reduceM, __avoidRC = False, True
456 elif performancePrf == "ReducedMemoryFootprint":
457 __reduceM, __avoidRC = True, False
458 elif performancePrf == "NoSavings":
459 __reduceM, __avoidRC = False, False
461 if asScript is not None:
462 __Matrix, __Function = None, None
464 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
466 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
467 __Function.update({"useApproximatedDerivatives":True})
468 __Function.update(__Parameters)
469 elif asThreeFunctions:
471 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
472 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
473 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
475 __Function.update(__Parameters)
478 if asOneFunction is not None:
479 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
480 if asOneFunction["Direct"] is not None:
481 __Function = asOneFunction
483 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
485 __Function = { "Direct":asOneFunction }
486 __Function.update({"useApproximatedDerivatives":True})
487 __Function.update(__Parameters)
488 elif asThreeFunctions is not None:
489 if isinstance(asThreeFunctions, dict) and \
490 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
491 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
492 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
493 __Function = asThreeFunctions
494 elif isinstance(asThreeFunctions, dict) and \
495 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
496 __Function = asThreeFunctions
497 __Function.update({"useApproximatedDerivatives":True})
499 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
500 if "Direct" not in asThreeFunctions:
501 __Function["Direct"] = asThreeFunctions["Tangent"]
502 __Function.update(__Parameters)
506 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
507 # for k in ("Direct", "Tangent", "Adjoint"):
508 # if k in __Function and hasattr(__Function[k],"__class__"):
509 # if type(__Function[k]) is type(self.__init__):
510 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
512 if appliedInX is not None and isinstance(appliedInX, dict):
513 __appliedInX = appliedInX
514 elif appliedInX is not None:
515 __appliedInX = {"HXb":appliedInX}
519 if scheduledBy is not None:
520 self.__T = scheduledBy
522 if isinstance(__Function, dict) and \
523 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
524 ("Direct" in __Function) and (__Function["Direct"] is not None):
525 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
526 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
527 if "withdX" not in __Function: __Function["withdX"] = None
528 if "withReducingMemoryUse" not in __Function: __Function["withReducingMemoryUse"] = __reduceM
529 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = __avoidRC
530 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
531 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
532 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
533 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
534 from daCore import NumericObjects
535 FDA = NumericObjects.FDApproximation(
537 Function = __Function["Direct"],
538 centeredDF = __Function["CenteredFiniteDifference"],
539 increment = __Function["DifferentialIncrement"],
540 dX = __Function["withdX"],
541 extraArguments = self.__extraArgs,
542 reducingMemoryUse = __Function["withReducingMemoryUse"],
543 avoidingRedundancy = __Function["withAvoidingRedundancy"],
544 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
545 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
546 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
547 mpWorkers = __Function["NumberOfProcesses"],
548 mfEnabled = __Function["withmfEnabled"],
550 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = FDA.DirectOperator, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
551 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = FDA.TangentOperator, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
552 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = FDA.AdjointOperator, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
553 elif isinstance(__Function, dict) and \
554 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
555 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
556 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = __Function["Direct"], reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
557 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = __Function["Tangent"], reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
558 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = __Function["Adjoint"], reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
559 elif asMatrix is not None:
560 if isinstance(__Matrix, str):
561 __Matrix = PlatformInfo.strmatrix2liststr( __Matrix )
562 __matrice = numpy.asarray( __Matrix, dtype=float )
563 self.__FO["Direct"] = Operator( name = self.__name, fromMatrix = __matrice, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
564 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMatrix = __matrice, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF )
565 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMatrix = __matrice.T, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF )
568 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a matrix, a Direct operator for approximate derivatives or a Tangent/Adjoint operators pair. Please check your operator input."%self.__name)
570 if __appliedInX is not None:
571 self.__FO["AppliedInX"] = {}
572 for key in __appliedInX:
573 if isinstance(__appliedInX[key], str):
574 __appliedInX[key] = PlatformInfo.strvect2liststr( __appliedInX[key] )
575 self.__FO["AppliedInX"][key] = numpy.ravel( __appliedInX[key] ).reshape((-1,1))
577 self.__FO["AppliedInX"] = None
583 "x.__repr__() <==> repr(x)"
584 return repr(self.__FO)
587 "x.__str__() <==> str(x)"
588 return str(self.__FO)
590 # ==============================================================================
591 class Algorithm(object):
593 Classe générale d'interface de type algorithme
595 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
596 d'assimilation, en fournissant un container (dictionnaire) de variables
597 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
599 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
601 def __init__(self, name):
603 L'initialisation présente permet de fabriquer des variables de stockage
604 disponibles de manière générique dans les algorithmes élémentaires. Ces
605 variables de stockage sont ensuite conservées dans un dictionnaire
606 interne à l'objet, mais auquel on accède par la méthode "get".
608 Les variables prévues sont :
609 - APosterioriCorrelations : matrice de corrélations de la matrice A
610 - APosterioriCovariance : matrice de covariances a posteriori : A
611 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
612 - APosterioriVariances : vecteur des variances de la matrice A
613 - Analysis : vecteur d'analyse : Xa
614 - BMA : Background moins Analysis : Xa - Xb
615 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
616 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
617 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
618 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
619 - CostFunctionJo : partie observations de la fonction-coût : Jo
620 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
621 - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
622 - CurrentOptimum : état optimal courant lors d'itérations
623 - CurrentState : état courant lors d'itérations
624 - CurrentStepNumber : pas courant d'avancement dans les algorithmes en évolution, à partir de 0
625 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
626 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
627 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
628 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
629 - Innovation : l'innovation : d = Y - H(X)
630 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
631 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
632 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
633 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
634 - KalmanGainAtOptimum : gain de Kalman à l'optimum
635 - MahalanobisConsistency : indicateur de consistance des covariances
636 - OMA : Observation moins Analyse : Y - Xa
637 - OMB : Observation moins Background : Y - Xb
638 - ForecastCovariance : covariance de l'état prédit courant lors d'itérations
639 - ForecastState : état prédit courant lors d'itérations
640 - Residu : dans le cas des algorithmes de vérification
641 - SampledStateForQuantiles : échantillons d'états pour l'estimation des quantiles
642 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
643 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
644 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
645 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
646 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
647 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
648 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
649 On peut rajouter des variables à stocker dans l'initialisation de
650 l'algorithme élémentaire qui va hériter de cette classe
652 logging.debug("%s Initialisation", str(name))
653 self._m = PlatformInfo.SystemUsage()
655 self._name = str( name )
656 self._parameters = {"StoreSupplementaryCalculations":[]}
657 self.__internal_state = {}
658 self.__required_parameters = {}
659 self.__required_inputs = {
660 "RequiredInputValues":{"mandatory":(), "optional":()},
661 "ClassificationTags":[],
663 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
664 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
665 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
666 self.__replace_by_the_new_name = {} # Nouveau nom à partir d'un nom ancien
668 self.StoredVariables = {}
669 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
670 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
671 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
672 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
673 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
674 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
675 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
676 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
677 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
678 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
679 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
680 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
681 self.StoredVariables["CurrentEnsembleState"] = Persistence.OneMatrix(name = "CurrentEnsembleState")
682 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
683 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
684 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
685 self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
686 self.StoredVariables["ForecastCovariance"] = Persistence.OneMatrix(name = "ForecastCovariance")
687 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
688 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
689 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
690 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
691 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
692 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
693 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
694 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
695 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
696 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
697 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
698 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
699 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
700 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
701 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
702 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
703 self.StoredVariables["SampledStateForQuantiles"] = Persistence.OneMatrix(name = "SampledStateForQuantiles")
704 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
705 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
706 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
707 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
708 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
709 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
710 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
711 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
713 for k in self.StoredVariables:
714 self.__canonical_stored_name[k.lower()] = k
716 for k, v in self.__variable_names_not_public.items():
717 self.__canonical_parameter_name[k.lower()] = k
718 self.__canonical_parameter_name["algorithm"] = "Algorithm"
719 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
721 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
723 logging.debug("%s Lancement", self._name)
724 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
725 self._getTimeState(reset=True)
727 # Mise a jour des paramètres internes avec le contenu de Parameters, en
728 # reprenant les valeurs par défauts pour toutes celles non définies
729 self.__setParameters(Parameters, reset=True)
730 for k, v in self.__variable_names_not_public.items():
731 if k not in self._parameters: self.__setParameters( {k:v} )
733 # Corrections et compléments des vecteurs
734 def __test_vvalue(argument, variable, argname, symbol=None):
735 if symbol is None: symbol = variable
737 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
738 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol))
739 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
740 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol))
742 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol))
744 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
745 logging.debug("%s %s vector %s is required and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
746 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
747 logging.debug("%s %s vector %s is optional and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
749 logging.debug("%s %s vector %s is set although neither required nor optional, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
751 __test_vvalue( Xb, "Xb", "Background or initial state" )
752 __test_vvalue( Y, "Y", "Observation" )
753 __test_vvalue( U, "U", "Control" )
755 # Corrections et compléments des covariances
756 def __test_cvalue(argument, variable, argname, symbol=None):
757 if symbol is None: symbol = variable
759 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
760 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol))
761 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
762 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol))
764 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol))
766 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
767 logging.debug("%s %s error covariance matrix %s is required and set."%(self._name,argname,symbol))
768 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
769 logging.debug("%s %s error covariance matrix %s is optional and set."%(self._name,argname,symbol))
771 logging.debug("%s %s error covariance matrix %s is set although neither required nor optional."%(self._name,argname,symbol))
773 __test_cvalue( B, "B", "Background" )
774 __test_cvalue( R, "R", "Observation" )
775 __test_cvalue( Q, "Q", "Evolution" )
777 # Corrections et compléments des opérateurs
778 def __test_ovalue(argument, variable, argname, symbol=None):
779 if symbol is None: symbol = variable
780 if argument is None or (isinstance(argument,dict) and len(argument)==0):
781 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
782 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol))
783 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
784 logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol))
786 logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol))
788 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
789 logging.debug("%s %s operator %s is required and set."%(self._name,argname,symbol))
790 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
791 logging.debug("%s %s operator %s is optional and set."%(self._name,argname,symbol))
793 logging.debug("%s %s operator %s is set although neither required nor optional."%(self._name,argname,symbol))
795 __test_ovalue( HO, "HO", "Observation", "H" )
796 __test_ovalue( EM, "EM", "Evolution", "M" )
797 __test_ovalue( CM, "CM", "Control Model", "C" )
799 # Corrections et compléments des bornes
800 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
801 logging.debug("%s Bounds taken into account"%(self._name,))
803 self._parameters["Bounds"] = None
804 if ("StateBoundsForQuantiles" in self._parameters) and isinstance(self._parameters["StateBoundsForQuantiles"], (list, tuple)) and (len(self._parameters["StateBoundsForQuantiles"]) > 0):
805 logging.debug("%s Bounds for quantiles states taken into account"%(self._name,))
806 # Attention : contrairement à Bounds, pas de défaut à None, sinon on ne peut pas être sans bornes
808 # Corrections et compléments de l'initialisation en X
809 if "InitializationPoint" in self._parameters:
811 if self._parameters["InitializationPoint"] is not None and hasattr(self._parameters["InitializationPoint"],'size'):
812 if self._parameters["InitializationPoint"].size != numpy.ravel(Xb).size:
813 raise ValueError("Incompatible size %i of forced initial point that have to replace the background of size %i" \
814 %(self._parameters["InitializationPoint"].size,numpy.ravel(Xb).size))
815 # Obtenu par typecast : numpy.ravel(self._parameters["InitializationPoint"])
817 self._parameters["InitializationPoint"] = numpy.ravel(Xb)
819 if self._parameters["InitializationPoint"] is None:
820 raise ValueError("Forced initial point can not be set without any given Background or required value")
822 # Correction pour pallier a un bug de TNC sur le retour du Minimum
823 if "Minimizer" in self._parameters and self._parameters["Minimizer"] == "TNC":
824 self.setParameterValue("StoreInternalVariables",True)
826 # Verbosité et logging
827 if logging.getLogger().level < logging.WARNING:
828 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
829 self._parameters["optmessages"] = 15
831 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
832 self._parameters["optmessages"] = 0
836 def _post_run(self,_oH=None):
838 if ("StoreSupplementaryCalculations" in self._parameters) and \
839 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
840 for _A in self.StoredVariables["APosterioriCovariance"]:
841 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
842 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
843 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
844 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
845 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
846 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
847 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
848 self.StoredVariables["APosterioriCorrelations"].store( _C )
849 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
850 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
851 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
852 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
853 logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
854 logging.debug("%s Terminé", self._name)
857 def _toStore(self, key):
858 "True if in StoreSupplementaryCalculations, else False"
859 return key in self._parameters["StoreSupplementaryCalculations"]
861 def get(self, key=None):
863 Renvoie l'une des variables stockées identifiée par la clé, ou le
864 dictionnaire de l'ensemble des variables disponibles en l'absence de
865 clé. Ce sont directement les variables sous forme objet qui sont
866 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
867 des classes de persistance.
870 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
872 return self.StoredVariables
874 def __contains__(self, key=None):
875 "D.__contains__(k) -> True if D has a key k, else False"
876 if key is None or key.lower() not in self.__canonical_stored_name:
879 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
882 "D.keys() -> list of D's keys"
883 if hasattr(self, "StoredVariables"):
884 return self.StoredVariables.keys()
889 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
890 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
891 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
896 raise TypeError("pop expected at least 1 arguments, got 0")
897 "If key is not found, d is returned if given, otherwise KeyError is raised"
903 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
905 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
906 sa forme mathématique la plus naturelle possible.
908 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
910 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None, listadv = None, oldname = None):
912 Permet de définir dans l'algorithme des paramètres requis et leurs
913 caractéristiques par défaut.
916 raise ValueError("A name is mandatory to define a required parameter.")
918 self.__required_parameters[name] = {
920 "typecast" : typecast,
928 self.__canonical_parameter_name[name.lower()] = name
929 if oldname is not None:
930 self.__canonical_parameter_name[oldname.lower()] = name # Conversion
931 self.__replace_by_the_new_name[oldname.lower()] = name
932 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
934 def getRequiredParameters(self, noDetails=True):
936 Renvoie la liste des noms de paramètres requis ou directement le
937 dictionnaire des paramètres requis.
940 return sorted(self.__required_parameters.keys())
942 return self.__required_parameters
944 def setParameterValue(self, name=None, value=None):
946 Renvoie la valeur d'un paramètre requis de manière contrôlée
948 __k = self.__canonical_parameter_name[name.lower()]
949 default = self.__required_parameters[__k]["default"]
950 typecast = self.__required_parameters[__k]["typecast"]
951 minval = self.__required_parameters[__k]["minval"]
952 maxval = self.__required_parameters[__k]["maxval"]
953 listval = self.__required_parameters[__k]["listval"]
954 listadv = self.__required_parameters[__k]["listadv"]
956 if value is None and default is None:
958 elif value is None and default is not None:
959 if typecast is None: __val = default
960 else: __val = typecast( default )
962 if typecast is None: __val = value
965 __val = typecast( value )
967 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
969 if minval is not None and (numpy.array(__val, float) < minval).any():
970 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
971 if maxval is not None and (numpy.array(__val, float) > maxval).any():
972 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
973 if listval is not None or listadv is not None:
974 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
976 if listval is not None and v in listval: continue
977 elif listadv is not None and v in listadv: continue
979 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
980 elif not (listval is not None and __val in listval) and not (listadv is not None and __val in listadv):
981 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
985 def requireInputArguments(self, mandatory=(), optional=()):
987 Permet d'imposer des arguments de calcul requis en entrée.
989 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
990 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
992 def getInputArguments(self):
994 Permet d'obtenir les listes des arguments de calcul requis en entrée.
996 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
998 def setAttributes(self, tags=()):
1000 Permet d'adjoindre des attributs comme les tags de classification.
1001 Renvoie la liste actuelle dans tous les cas.
1003 self.__required_inputs["ClassificationTags"].extend( tags )
1004 return self.__required_inputs["ClassificationTags"]
1006 def __setParameters(self, fromDico={}, reset=False):
1008 Permet de stocker les paramètres reçus dans le dictionnaire interne.
1010 self._parameters.update( fromDico )
1011 __inverse_fromDico_keys = {}
1012 for k in fromDico.keys():
1013 if k.lower() in self.__canonical_parameter_name:
1014 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
1015 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
1016 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
1018 for k in __inverse_fromDico_keys.values():
1019 if k.lower() in self.__replace_by_the_new_name:
1020 __newk = self.__replace_by_the_new_name[k.lower()]
1021 __msg = "the parameter '%s' used in '%s' algorithm case is deprecated and has to be replaced by '%s'. Please update your code."%(k,self._name,__newk)
1022 warnings.warn(__msg, FutureWarning, stacklevel=50)
1024 for k in self.__required_parameters.keys():
1025 if k in __canonic_fromDico_keys:
1026 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
1028 self._parameters[k] = self.setParameterValue(k)
1031 if hasattr(self._parameters[k],"__len__") and len(self._parameters[k]) > 100:
1032 logging.debug("%s %s de longueur %s", self._name, self.__required_parameters[k]["message"], len(self._parameters[k]))
1034 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
1036 def _setInternalState(self, key=None, value=None, fromDico={}, reset=False):
1038 Permet de stocker des variables nommées constituant l'état interne
1040 if reset: # Vide le dictionnaire préalablement
1041 self.__internal_state = {}
1042 if key is not None and value is not None:
1043 self.__internal_state[key] = value
1044 self.__internal_state.update( dict(fromDico) )
1046 def _getInternalState(self, key=None):
1048 Restitue un état interne sous la forme d'un dictionnaire de variables nommées
1050 if key is not None and key in self.__internal_state:
1051 return self.__internal_state[key]
1053 return self.__internal_state
1055 def _getTimeState(self, reset=False):
1057 Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde
1060 self.__initial_cpu_time = time.process_time()
1061 self.__initial_elapsed_time = time.perf_counter()
1064 self.__cpu_time = time.process_time() - self.__initial_cpu_time
1065 self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time
1066 return self.__cpu_time, self.__elapsed_time
1068 def _StopOnTimeLimit(self, X=None, withReason=False):
1069 "Stop criteria on time limit: True/False [+ Reason]"
1070 c, e = self._getTimeState()
1071 if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]:
1072 __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"])
1073 elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]:
1074 __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"])
1076 __SC, __SR = False, ""
1082 # ==============================================================================
1083 class PartialAlgorithm(object):
1085 Classe pour mimer "Algorithm" du point de vue stockage, mais sans aucune
1086 action avancée comme la vérification . Pour les méthodes reprises ici,
1087 le fonctionnement est identique à celles de la classe "Algorithm".
1089 def __init__(self, name):
1090 self._name = str( name )
1091 self._parameters = {"StoreSupplementaryCalculations":[]}
1093 self.StoredVariables = {}
1094 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
1095 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
1096 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
1097 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
1098 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
1099 self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
1101 self.__canonical_stored_name = {}
1102 for k in self.StoredVariables:
1103 self.__canonical_stored_name[k.lower()] = k
1105 def _toStore(self, key):
1106 "True if in StoreSupplementaryCalculations, else False"
1107 return key in self._parameters["StoreSupplementaryCalculations"]
1109 def get(self, key=None):
1111 Renvoie l'une des variables stockées identifiée par la clé, ou le
1112 dictionnaire de l'ensemble des variables disponibles en l'absence de
1113 clé. Ce sont directement les variables sous forme objet qui sont
1114 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
1115 des classes de persistance.
1118 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
1120 return self.StoredVariables
1122 # ==============================================================================
1123 class AlgorithmAndParameters(object):
1125 Classe générale d'interface d'action pour l'algorithme et ses paramètres
1128 name = "GenericAlgorithm",
1135 self.__name = str(name)
1139 self.__algorithm = {}
1140 self.__algorithmFile = None
1141 self.__algorithmName = None
1143 self.updateParameters( asDict, asScript )
1145 if asAlgorithm is None and asScript is not None:
1146 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1148 __Algo = asAlgorithm
1150 if __Algo is not None:
1151 self.__A = str(__Algo)
1152 self.__P.update( {"Algorithm":self.__A} )
1154 self.__setAlgorithm( self.__A )
1156 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
1158 def updateParameters(self,
1162 "Mise a jour des parametres"
1163 if asDict is None and asScript is not None:
1164 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1168 if __Dict is not None:
1169 self.__P.update( dict(__Dict) )
1171 def executePythonScheme(self, asDictAO = None):
1172 "Permet de lancer le calcul d'assimilation"
1173 Operator.CM.clearCache()
1175 if not isinstance(asDictAO, dict):
1176 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1177 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
1178 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
1179 else: self.__Xb = None
1180 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
1181 else: self.__Y = asDictAO["Observation"]
1182 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
1183 else: self.__U = asDictAO["ControlInput"]
1184 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
1185 else: self.__HO = asDictAO["ObservationOperator"]
1186 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
1187 else: self.__EM = asDictAO["EvolutionModel"]
1188 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
1189 else: self.__CM = asDictAO["ControlModel"]
1190 self.__B = asDictAO["BackgroundError"]
1191 self.__R = asDictAO["ObservationError"]
1192 self.__Q = asDictAO["EvolutionError"]
1194 self.__shape_validate()
1196 self.__algorithm.run(
1206 Parameters = self.__P,
1210 def executeYACSScheme(self, FileName=None):
1211 "Permet de lancer le calcul d'assimilation"
1212 if FileName is None or not os.path.exists(FileName):
1213 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1215 __file = os.path.abspath(FileName)
1216 logging.debug("The YACS file name is \"%s\"."%__file)
1217 if not PlatformInfo.has_salome or \
1218 not PlatformInfo.has_yacs or \
1219 not PlatformInfo.has_adao:
1220 raise ImportError("\n\n"+\
1221 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
1222 "Please load the right environnement before trying to use it.\n")
1225 import SALOMERuntime
1227 SALOMERuntime.RuntimeSALOME_setRuntime()
1229 r = pilot.getRuntime()
1230 xmlLoader = loader.YACSLoader()
1231 xmlLoader.registerProcCataLoader()
1233 catalogAd = r.loadCatalog("proc", __file)
1234 r.addCatalog(catalogAd)
1239 p = xmlLoader.load(__file)
1240 except IOError as ex:
1241 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1243 logger = p.getLogger("parser")
1244 if not logger.isEmpty():
1245 print("The imported YACS XML schema has errors on parsing:")
1246 print(logger.getStr())
1249 print("The YACS XML schema is not valid and will not be executed:")
1250 print(p.getErrorReport())
1252 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1253 p.checkConsistency(info)
1254 if info.areWarningsOrErrors():
1255 print("The YACS XML schema is not coherent and will not be executed:")
1256 print(info.getGlobalRepr())
1258 e = pilot.ExecutorSwig()
1260 if p.getEffectiveState() != pilot.DONE:
1261 print(p.getErrorReport())
1265 def get(self, key = None):
1266 "Vérifie l'existence d'une clé de variable ou de paramètres"
1267 if key in self.__algorithm:
1268 return self.__algorithm.get( key )
1269 elif key in self.__P:
1270 return self.__P[key]
1272 allvariables = self.__P
1273 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1276 def pop(self, k, d):
1277 "Necessaire pour le pickling"
1278 return self.__algorithm.pop(k, d)
1280 def getAlgorithmRequiredParameters(self, noDetails=True):
1281 "Renvoie la liste des paramètres requis selon l'algorithme"
1282 return self.__algorithm.getRequiredParameters(noDetails)
1284 def getAlgorithmInputArguments(self):
1285 "Renvoie la liste des entrées requises selon l'algorithme"
1286 return self.__algorithm.getInputArguments()
1288 def getAlgorithmAttributes(self):
1289 "Renvoie la liste des attributs selon l'algorithme"
1290 return self.__algorithm.setAttributes()
1292 def setObserver(self, __V, __O, __I, __S):
1293 if self.__algorithm is None \
1294 or isinstance(self.__algorithm, dict) \
1295 or not hasattr(self.__algorithm,"StoredVariables"):
1296 raise ValueError("No observer can be build before choosing an algorithm.")
1297 if __V not in self.__algorithm:
1298 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1300 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1303 HookParameters = __I,
1306 def removeObserver(self, __V, __O, __A = False):
1307 if self.__algorithm is None \
1308 or isinstance(self.__algorithm, dict) \
1309 or not hasattr(self.__algorithm,"StoredVariables"):
1310 raise ValueError("No observer can be removed before choosing an algorithm.")
1311 if __V not in self.__algorithm:
1312 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1314 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1319 def hasObserver(self, __V):
1320 if self.__algorithm is None \
1321 or isinstance(self.__algorithm, dict) \
1322 or not hasattr(self.__algorithm,"StoredVariables"):
1324 if __V not in self.__algorithm:
1326 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1329 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1330 for k in self.__variable_names_not_public:
1331 if k in __allvariables: __allvariables.remove(k)
1332 return __allvariables
1334 def __contains__(self, key=None):
1335 "D.__contains__(k) -> True if D has a key k, else False"
1336 return key in self.__algorithm or key in self.__P
1339 "x.__repr__() <==> repr(x)"
1340 return repr(self.__A)+", "+repr(self.__P)
1343 "x.__str__() <==> str(x)"
1344 return str(self.__A)+", "+str(self.__P)
1346 def __setAlgorithm(self, choice = None ):
1348 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1349 d'assimilation. L'argument est un champ caractère se rapportant au nom
1350 d'un algorithme réalisant l'opération sur les arguments fixes.
1353 raise ValueError("Error: algorithm choice has to be given")
1354 if self.__algorithmName is not None:
1355 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1356 daDirectory = "daAlgorithms"
1358 # Recherche explicitement le fichier complet
1359 # ------------------------------------------
1361 for directory in sys.path:
1362 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1363 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1364 if module_path is None:
1365 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1367 # Importe le fichier complet comme un module
1368 # ------------------------------------------
1370 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1371 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1372 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1373 raise ImportError("this module does not define a valid elementary algorithm.")
1374 self.__algorithmName = str(choice)
1375 sys.path = sys_path_tmp ; del sys_path_tmp
1376 except ImportError as e:
1377 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1379 # Instancie un objet du type élémentaire du fichier
1380 # -------------------------------------------------
1381 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1384 def __shape_validate(self):
1386 Validation de la correspondance correcte des tailles des variables et
1387 des matrices s'il y en a.
1389 if self.__Xb is None: __Xb_shape = (0,)
1390 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1391 elif hasattr(self.__Xb,"shape"):
1392 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1393 else: __Xb_shape = self.__Xb.shape()
1394 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1396 if self.__Y is None: __Y_shape = (0,)
1397 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1398 elif hasattr(self.__Y,"shape"):
1399 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1400 else: __Y_shape = self.__Y.shape()
1401 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1403 if self.__U is None: __U_shape = (0,)
1404 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1405 elif hasattr(self.__U,"shape"):
1406 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1407 else: __U_shape = self.__U.shape()
1408 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1410 if self.__B is None: __B_shape = (0,0)
1411 elif hasattr(self.__B,"shape"):
1412 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1413 else: __B_shape = self.__B.shape()
1414 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1416 if self.__R is None: __R_shape = (0,0)
1417 elif hasattr(self.__R,"shape"):
1418 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1419 else: __R_shape = self.__R.shape()
1420 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1422 if self.__Q is None: __Q_shape = (0,0)
1423 elif hasattr(self.__Q,"shape"):
1424 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1425 else: __Q_shape = self.__Q.shape()
1426 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1428 if len(self.__HO) == 0: __HO_shape = (0,0)
1429 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1430 elif hasattr(self.__HO["Direct"],"shape"):
1431 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1432 else: __HO_shape = self.__HO["Direct"].shape()
1433 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1435 if len(self.__EM) == 0: __EM_shape = (0,0)
1436 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1437 elif hasattr(self.__EM["Direct"],"shape"):
1438 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1439 else: __EM_shape = self.__EM["Direct"].shape()
1440 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1442 if len(self.__CM) == 0: __CM_shape = (0,0)
1443 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1444 elif hasattr(self.__CM["Direct"],"shape"):
1445 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1446 else: __CM_shape = self.__CM["Direct"].shape()
1447 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1449 # Vérification des conditions
1450 # ---------------------------
1451 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1452 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1453 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1454 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1456 if not( min(__B_shape) == max(__B_shape) ):
1457 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1458 if not( min(__R_shape) == max(__R_shape) ):
1459 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1460 if not( min(__Q_shape) == max(__Q_shape) ):
1461 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1462 if not( min(__EM_shape) == max(__EM_shape) ):
1463 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1465 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1466 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1467 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1468 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1469 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1470 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1471 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1472 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1474 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1475 if self.__algorithmName in ["EnsembleBlue",]:
1476 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1477 self.__Xb = Persistence.OneVector("Background")
1478 for member in asPersistentVector:
1479 self.__Xb.store( numpy.asarray(member, dtype=float) )
1480 __Xb_shape = min(__B_shape)
1482 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1484 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1485 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1487 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1488 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1490 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1491 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1493 if ("Bounds" in self.__P) \
1494 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1495 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1496 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1497 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1499 if ("StateBoundsForQuantiles" in self.__P) \
1500 and (isinstance(self.__P["StateBoundsForQuantiles"], list) or isinstance(self.__P["StateBoundsForQuantiles"], tuple)) \
1501 and (len(self.__P["StateBoundsForQuantiles"]) != max(__Xb_shape)):
1502 raise ValueError("The number \"%s\" of bound pairs for the quantile state (X) components is different of the size \"%s\" of the state itself." \
1503 %(len(self.__P["StateBoundsForQuantiles"]),max(__Xb_shape)))
1507 # ==============================================================================
1508 class RegulationAndParameters(object):
1510 Classe générale d'interface d'action pour la régulation et ses paramètres
1513 name = "GenericRegulation",
1520 self.__name = str(name)
1523 if asAlgorithm is None and asScript is not None:
1524 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1526 __Algo = asAlgorithm
1528 if asDict is None and asScript is not None:
1529 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1533 if __Dict is not None:
1534 self.__P.update( dict(__Dict) )
1536 if __Algo is not None:
1537 self.__P.update( {"Algorithm":str(__Algo)} )
1539 def get(self, key = None):
1540 "Vérifie l'existence d'une clé de variable ou de paramètres"
1542 return self.__P[key]
1546 # ==============================================================================
1547 class DataObserver(object):
1549 Classe générale d'interface de type observer
1552 name = "GenericObserver",
1564 self.__name = str(name)
1569 if onVariable is None:
1570 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1571 elif type(onVariable) in (tuple, list):
1572 self.__V = tuple(map( str, onVariable ))
1573 if withInfo is None:
1576 self.__I = (str(withInfo),)*len(self.__V)
1577 elif isinstance(onVariable, str):
1578 self.__V = (onVariable,)
1579 if withInfo is None:
1580 self.__I = (onVariable,)
1582 self.__I = (str(withInfo),)
1584 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1586 if asObsObject is not None:
1587 self.__O = asObsObject
1589 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1590 __Function = Observer2Func(__FunctionText)
1591 self.__O = __Function.getfunc()
1593 for k in range(len(self.__V)):
1596 if ename not in withAlgo:
1597 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1599 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1602 "x.__repr__() <==> repr(x)"
1603 return repr(self.__V)+"\n"+repr(self.__O)
1606 "x.__str__() <==> str(x)"
1607 return str(self.__V)+"\n"+str(self.__O)
1609 # ==============================================================================
1610 class UserScript(object):
1612 Classe générale d'interface de type texte de script utilisateur
1615 name = "GenericUserScript",
1622 self.__name = str(name)
1624 if asString is not None:
1626 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1627 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1628 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1629 self.__F = Templates.ObserverTemplates[asTemplate]
1630 elif asScript is not None:
1631 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1636 "x.__repr__() <==> repr(x)"
1637 return repr(self.__F)
1640 "x.__str__() <==> str(x)"
1641 return str(self.__F)
1643 # ==============================================================================
1644 class ExternalParameters(object):
1646 Classe générale d'interface de type texte de script utilisateur
1649 name = "GenericExternalParameters",
1655 self.__name = str(name)
1658 self.updateParameters( asDict, asScript )
1660 def updateParameters(self,
1664 "Mise a jour des parametres"
1665 if asDict is None and asScript is not None:
1666 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1670 if __Dict is not None:
1671 self.__P.update( dict(__Dict) )
1673 def get(self, key = None):
1675 return self.__P[key]
1677 return list(self.__P.keys())
1680 return list(self.__P.keys())
1682 def pop(self, k, d):
1683 return self.__P.pop(k, d)
1686 return self.__P.items()
1688 def __contains__(self, key=None):
1689 "D.__contains__(k) -> True if D has a key k, else False"
1690 return key in self.__P
1692 # ==============================================================================
1693 class State(object):
1695 Classe générale d'interface de type état
1698 name = "GenericVector",
1700 asPersistentVector = None,
1706 toBeChecked = False,
1709 Permet de définir un vecteur :
1710 - asVector : entrée des données, comme un vecteur compatible avec le
1711 constructeur de numpy.matrix, ou "True" si entrée par script.
1712 - asPersistentVector : entrée des données, comme une série de vecteurs
1713 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1714 type Persistence, ou "True" si entrée par script.
1715 - asScript : si un script valide est donné contenant une variable
1716 nommée "name", la variable est de type "asVector" (par défaut) ou
1717 "asPersistentVector" selon que l'une de ces variables est placée à
1719 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1720 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1721 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1722 nommée "name"), on récupère les colonnes et on les range ligne après
1723 ligne (colMajor=False, par défaut) ou colonne après colonne
1724 (colMajor=True). La variable résultante est de type "asVector" (par
1725 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1728 self.__name = str(name)
1729 self.__check = bool(toBeChecked)
1733 self.__is_vector = False
1734 self.__is_series = False
1736 if asScript is not None:
1737 __Vector, __Series = None, None
1738 if asPersistentVector:
1739 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1741 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1742 elif asDataFile is not None:
1743 __Vector, __Series = None, None
1744 if asPersistentVector:
1745 if colNames is not None:
1746 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1748 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1749 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1750 __Series = numpy.transpose(__Series)
1751 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1752 __Series = numpy.transpose(__Series)
1754 if colNames is not None:
1755 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1757 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1759 __Vector = numpy.ravel(__Vector, order = "F")
1761 __Vector = numpy.ravel(__Vector, order = "C")
1763 __Vector, __Series = asVector, asPersistentVector
1765 if __Vector is not None:
1766 self.__is_vector = True
1767 if isinstance(__Vector, str):
1768 __Vector = PlatformInfo.strvect2liststr( __Vector )
1769 self.__V = numpy.ravel(numpy.asarray( __Vector, dtype=float )).reshape((-1,1))
1770 self.shape = self.__V.shape
1771 self.size = self.__V.size
1772 elif __Series is not None:
1773 self.__is_series = True
1774 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1775 self.__V = Persistence.OneVector(self.__name)
1776 if isinstance(__Series, str):
1777 __Series = PlatformInfo.strmatrix2liststr(__Series)
1778 for member in __Series:
1779 if isinstance(member, str):
1780 member = PlatformInfo.strvect2liststr( member )
1781 self.__V.store(numpy.asarray( member, dtype=float ))
1784 if isinstance(self.__V.shape, (tuple, list)):
1785 self.shape = self.__V.shape
1787 self.shape = self.__V.shape()
1788 if len(self.shape) == 1:
1789 self.shape = (self.shape[0],1)
1790 self.size = self.shape[0] * self.shape[1]
1792 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1794 if scheduledBy is not None:
1795 self.__T = scheduledBy
1797 def getO(self, withScheduler=False):
1799 return self.__V, self.__T
1800 elif self.__T is None:
1806 "Vérification du type interne"
1807 return self.__is_vector
1810 "Vérification du type interne"
1811 return self.__is_series
1814 "x.__repr__() <==> repr(x)"
1815 return repr(self.__V)
1818 "x.__str__() <==> str(x)"
1819 return str(self.__V)
1821 # ==============================================================================
1822 class Covariance(object):
1824 Classe générale d'interface de type covariance
1827 name = "GenericCovariance",
1828 asCovariance = None,
1829 asEyeByScalar = None,
1830 asEyeByVector = None,
1833 toBeChecked = False,
1836 Permet de définir une covariance :
1837 - asCovariance : entrée des données, comme une matrice compatible avec
1838 le constructeur de numpy.matrix
1839 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1840 multiplicatif d'une matrice de corrélation identité, aucune matrice
1841 n'étant donc explicitement à donner
1842 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1843 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1844 n'étant donc explicitement à donner
1845 - asCovObject : entrée des données comme un objet python, qui a les
1846 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1847 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1848 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1849 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1850 pleine doit être vérifié
1852 self.__name = str(name)
1853 self.__check = bool(toBeChecked)
1856 self.__is_scalar = False
1857 self.__is_vector = False
1858 self.__is_matrix = False
1859 self.__is_object = False
1861 if asScript is not None:
1862 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1864 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1866 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1868 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1870 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1872 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1874 if __Scalar is not None:
1875 if isinstance(__Scalar, str):
1876 __Scalar = PlatformInfo.strvect2liststr( __Scalar )
1877 if len(__Scalar) > 0: __Scalar = __Scalar[0]
1878 if numpy.array(__Scalar).size != 1:
1879 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.array(__Scalar).size)
1880 self.__is_scalar = True
1881 self.__C = numpy.abs( float(__Scalar) )
1884 elif __Vector is not None:
1885 if isinstance(__Vector, str):
1886 __Vector = PlatformInfo.strvect2liststr( __Vector )
1887 self.__is_vector = True
1888 self.__C = numpy.abs( numpy.ravel(numpy.asarray( __Vector, dtype=float )) )
1889 self.shape = (self.__C.size,self.__C.size)
1890 self.size = self.__C.size**2
1891 elif __Matrix is not None:
1892 self.__is_matrix = True
1893 self.__C = numpy.matrix( __Matrix, float )
1894 self.shape = self.__C.shape
1895 self.size = self.__C.size
1896 elif __Object is not None:
1897 self.__is_object = True
1899 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__matmul__","__mul__","__rmatmul__","__rmul__"):
1900 if not hasattr(self.__C,at):
1901 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1902 if hasattr(self.__C,"shape"):
1903 self.shape = self.__C.shape
1906 if hasattr(self.__C,"size"):
1907 self.size = self.__C.size
1912 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1916 def __validate(self):
1918 if self.__C is None:
1919 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1920 if self.ismatrix() and min(self.shape) != max(self.shape):
1921 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1922 if self.isobject() and min(self.shape) != max(self.shape):
1923 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1924 if self.isscalar() and self.__C <= 0:
1925 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1926 if self.isvector() and (self.__C <= 0).any():
1927 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1928 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1930 numpy.linalg.cholesky( self.__C )
1932 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1933 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1937 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1940 "Vérification du type interne"
1941 return self.__is_scalar
1944 "Vérification du type interne"
1945 return self.__is_vector
1948 "Vérification du type interne"
1949 return self.__is_matrix
1952 "Vérification du type interne"
1953 return self.__is_object
1958 return Covariance(self.__name+"I", asCovariance = numpy.linalg.inv(self.__C) )
1959 elif self.isvector():
1960 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1961 elif self.isscalar():
1962 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1963 elif self.isobject() and hasattr(self.__C,"getI"):
1964 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1966 return None # Indispensable
1971 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1972 elif self.isvector():
1973 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1974 elif self.isscalar():
1975 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1976 elif self.isobject() and hasattr(self.__C,"getT"):
1977 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1979 raise AttributeError("the %s covariance matrix has no getT attribute."%(self.__name,))
1982 "Décomposition de Cholesky"
1984 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1985 elif self.isvector():
1986 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1987 elif self.isscalar():
1988 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1989 elif self.isobject() and hasattr(self.__C,"cholesky"):
1990 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1992 raise AttributeError("the %s covariance matrix has no cholesky attribute."%(self.__name,))
1994 def choleskyI(self):
1995 "Inversion de la décomposition de Cholesky"
1997 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.linalg.cholesky(self.__C)) )
1998 elif self.isvector():
1999 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
2000 elif self.isscalar():
2001 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
2002 elif self.isobject() and hasattr(self.__C,"choleskyI"):
2003 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
2005 raise AttributeError("the %s covariance matrix has no choleskyI attribute."%(self.__name,))
2008 "Racine carrée matricielle"
2011 return Covariance(self.__name+"C", asCovariance = numpy.real(scipy.linalg.sqrtm(self.__C)) )
2012 elif self.isvector():
2013 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
2014 elif self.isscalar():
2015 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
2016 elif self.isobject() and hasattr(self.__C,"sqrtm"):
2017 return Covariance(self.__name+"C", asCovObject = self.__C.sqrtm() )
2019 raise AttributeError("the %s covariance matrix has no sqrtm attribute."%(self.__name,))
2022 "Inversion de la racine carrée matricielle"
2025 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.real(scipy.linalg.sqrtm(self.__C))) )
2026 elif self.isvector():
2027 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
2028 elif self.isscalar():
2029 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
2030 elif self.isobject() and hasattr(self.__C,"sqrtmI"):
2031 return Covariance(self.__name+"H", asCovObject = self.__C.sqrtmI() )
2033 raise AttributeError("the %s covariance matrix has no sqrtmI attribute."%(self.__name,))
2035 def diag(self, msize=None):
2036 "Diagonale de la matrice"
2038 return numpy.diag(self.__C)
2039 elif self.isvector():
2041 elif self.isscalar():
2043 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2045 return self.__C * numpy.ones(int(msize))
2046 elif self.isobject() and hasattr(self.__C,"diag"):
2047 return self.__C.diag()
2049 raise AttributeError("the %s covariance matrix has no diag attribute."%(self.__name,))
2051 def trace(self, msize=None):
2052 "Trace de la matrice"
2054 return numpy.trace(self.__C)
2055 elif self.isvector():
2056 return float(numpy.sum(self.__C))
2057 elif self.isscalar():
2059 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2061 return self.__C * int(msize)
2062 elif self.isobject():
2063 return self.__C.trace()
2065 raise AttributeError("the %s covariance matrix has no trace attribute."%(self.__name,))
2067 def asfullmatrix(self, msize=None):
2070 return numpy.asarray(self.__C, dtype=float)
2071 elif self.isvector():
2072 return numpy.asarray( numpy.diag(self.__C), dtype=float )
2073 elif self.isscalar():
2075 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2077 return numpy.asarray( self.__C * numpy.eye(int(msize)), dtype=float )
2078 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
2079 return self.__C.asfullmatrix()
2081 raise AttributeError("the %s covariance matrix has no asfullmatrix attribute."%(self.__name,))
2083 def assparsematrix(self):
2091 "x.__repr__() <==> repr(x)"
2092 return repr(self.__C)
2095 "x.__str__() <==> str(x)"
2096 return str(self.__C)
2098 def __add__(self, other):
2099 "x.__add__(y) <==> x+y"
2100 if self.ismatrix() or self.isobject():
2101 return self.__C + numpy.asmatrix(other)
2102 elif self.isvector() or self.isscalar():
2103 _A = numpy.asarray(other)
2104 if len(_A.shape) == 1:
2105 _A.reshape((-1,1))[::2] += self.__C
2107 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
2108 return numpy.asmatrix(_A)
2110 def __radd__(self, other):
2111 "x.__radd__(y) <==> y+x"
2112 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
2114 def __sub__(self, other):
2115 "x.__sub__(y) <==> x-y"
2116 if self.ismatrix() or self.isobject():
2117 return self.__C - numpy.asmatrix(other)
2118 elif self.isvector() or self.isscalar():
2119 _A = numpy.asarray(other)
2120 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
2121 return numpy.asmatrix(_A)
2123 def __rsub__(self, other):
2124 "x.__rsub__(y) <==> y-x"
2125 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
2128 "x.__neg__() <==> -x"
2131 def __matmul__(self, other):
2132 "x.__mul__(y) <==> x@y"
2133 if self.ismatrix() and isinstance(other, (int, float)):
2134 return numpy.asarray(self.__C) * other
2135 elif self.ismatrix() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2136 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2137 return numpy.ravel(self.__C @ numpy.ravel(other))
2138 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2139 return numpy.asarray(self.__C) @ numpy.asarray(other)
2141 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asarray(other).shape,self.__name))
2142 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2143 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2144 return numpy.ravel(self.__C) * numpy.ravel(other)
2145 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2146 return numpy.ravel(self.__C).reshape((-1,1)) * numpy.asarray(other)
2148 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2149 elif self.isscalar() and isinstance(other,numpy.matrix):
2150 return numpy.asarray(self.__C * other)
2151 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2152 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2153 return self.__C * numpy.ravel(other)
2155 return self.__C * numpy.asarray(other)
2156 elif self.isobject():
2157 return self.__C.__matmul__(other)
2159 raise NotImplementedError("%s covariance matrix __matmul__ method not available for %s type!"%(self.__name,type(other)))
2161 def __mul__(self, other):
2162 "x.__mul__(y) <==> x*y"
2163 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2164 return self.__C * other
2165 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2166 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2167 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2168 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2169 return self.__C * numpy.asmatrix(other)
2171 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
2172 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2173 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2174 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
2175 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2176 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
2178 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2179 elif self.isscalar() and isinstance(other,numpy.matrix):
2180 return self.__C * other
2181 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2182 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2183 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2185 return self.__C * numpy.asmatrix(other)
2186 elif self.isobject():
2187 return self.__C.__mul__(other)
2189 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
2191 def __rmatmul__(self, other):
2192 "x.__rmul__(y) <==> y@x"
2193 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2194 return other * self.__C
2195 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2196 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2197 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2198 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2199 return numpy.asmatrix(other) * self.__C
2201 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2202 elif self.isvector() and isinstance(other,numpy.matrix):
2203 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2204 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2205 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2206 return numpy.asmatrix(numpy.array(other) * self.__C)
2208 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2209 elif self.isscalar() and isinstance(other,numpy.matrix):
2210 return other * self.__C
2211 elif self.isobject():
2212 return self.__C.__rmatmul__(other)
2214 raise NotImplementedError("%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name,type(other)))
2216 def __rmul__(self, other):
2217 "x.__rmul__(y) <==> y*x"
2218 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2219 return other * self.__C
2220 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2221 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2222 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2223 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2224 return numpy.asmatrix(other) * self.__C
2226 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2227 elif self.isvector() and isinstance(other,numpy.matrix):
2228 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2229 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2230 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2231 return numpy.asmatrix(numpy.array(other) * self.__C)
2233 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2234 elif self.isscalar() and isinstance(other,numpy.matrix):
2235 return other * self.__C
2236 elif self.isscalar() and isinstance(other,float):
2237 return other * self.__C
2238 elif self.isobject():
2239 return self.__C.__rmul__(other)
2241 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
2244 "x.__len__() <==> len(x)"
2245 return self.shape[0]
2247 # ==============================================================================
2248 class Observer2Func(object):
2250 Création d'une fonction d'observateur a partir de son texte
2252 def __init__(self, corps=""):
2253 self.__corps = corps
2254 def func(self,var,info):
2255 "Fonction d'observation"
2258 "Restitution du pointeur de fonction dans l'objet"
2261 # ==============================================================================
2262 class CaseLogger(object):
2264 Conservation des commandes de création d'un cas
2266 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
2267 self.__name = str(__name)
2268 self.__objname = str(__objname)
2269 self.__logSerie = []
2270 self.__switchoff = False
2272 "TUI" :Interfaces._TUIViewer,
2273 "SCD" :Interfaces._SCDViewer,
2274 "YACS":Interfaces._YACSViewer,
2275 "SimpleReportInRst":Interfaces._SimpleReportInRstViewer,
2276 "SimpleReportInHtml":Interfaces._SimpleReportInHtmlViewer,
2277 "SimpleReportInPlainTxt":Interfaces._SimpleReportInPlainTxtViewer,
2280 "TUI" :Interfaces._TUIViewer,
2281 "COM" :Interfaces._COMViewer,
2283 if __addViewers is not None:
2284 self.__viewers.update(dict(__addViewers))
2285 if __addLoaders is not None:
2286 self.__loaders.update(dict(__addLoaders))
2288 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
2289 "Enregistrement d'une commande individuelle"
2290 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
2291 if "self" in __keys: __keys.remove("self")
2292 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
2294 self.__switchoff = True
2296 self.__switchoff = False
2298 def dump(self, __filename=None, __format="TUI", __upa=""):
2299 "Restitution normalisée des commandes"
2300 if __format in self.__viewers:
2301 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2303 raise ValueError("Dumping as \"%s\" is not available"%__format)
2304 return __formater.dump(__filename, __upa)
2306 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2307 "Chargement normalisé des commandes"
2308 if __format in self.__loaders:
2309 __formater = self.__loaders[__format]()
2311 raise ValueError("Loading as \"%s\" is not available"%__format)
2312 return __formater.load(__filename, __content, __object)
2314 # ==============================================================================
2317 _extraArguments = None,
2318 _sFunction = lambda x: x,
2323 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2324 correspondante de valeurs de la fonction en argument
2326 # Vérifications et définitions initiales
2327 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2328 if not PlatformInfo.isIterable( __xserie ):
2329 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2331 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2334 __mpWorkers = int(_mpWorkers)
2336 import multiprocessing
2347 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2348 with multiprocessing.Pool(__mpWorkers) as pool:
2349 __multiHX = pool.map( _sFunction, _jobs )
2352 # logging.debug("MULTF Internal multiprocessing calculation end")
2354 # logging.debug("MULTF Internal monoprocessing calculation begin")
2356 if _extraArguments is None:
2357 for __xvalue in __xserie:
2358 __multiHX.append( _sFunction( __xvalue ) )
2359 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2360 for __xvalue in __xserie:
2361 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2362 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2363 for __xvalue in __xserie:
2364 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2366 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2367 # logging.debug("MULTF Internal monoprocessing calculation end")
2369 # logging.debug("MULTF Internal multifonction calculations end")
2372 # ==============================================================================
2373 if __name__ == "__main__":
2374 print('\n AUTODIAGNOSTIC\n')