1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2023 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
36 from functools import partial
37 from daCore import Persistence, PlatformInfo, Interfaces
38 from daCore import Templates
40 # ==============================================================================
41 class CacheManager(object):
43 Classe générale de gestion d'un cache de calculs
46 "__tolerBP", "__lengthOR", "__initlnOR", "__seenNames", "__enabled",
51 toleranceInRedundancy = 1.e-18,
52 lengthOfRedundancy = -1,
55 Les caractéristiques de tolérance peuvent être modifiées à la création.
57 self.__tolerBP = float(toleranceInRedundancy)
58 self.__lengthOR = int(lengthOfRedundancy)
59 self.__initlnOR = self.__lengthOR
69 def wasCalculatedIn(self, xValue, oName="" ):
70 "Vérifie l'existence d'un calcul correspondant à la valeur"
74 for i in range(min(len(self.__listOPCV),self.__lengthOR)-1,-1,-1):
75 if not hasattr(xValue, 'size'):
77 elif (str(oName) != self.__listOPCV[i][3]):
79 elif (xValue.size != self.__listOPCV[i][0].size):
81 elif (numpy.ravel(xValue)[0] - self.__listOPCV[i][0][0]) > (self.__tolerBP * self.__listOPCV[i][2] / self.__listOPCV[i][0].size):
83 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < (self.__tolerBP * self.__listOPCV[i][2]):
85 __HxV = self.__listOPCV[i][1]
89 def storeValueInX(self, xValue, HxValue, oName="" ):
90 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
91 if self.__lengthOR < 0:
92 self.__lengthOR = 2 * min(numpy.size(xValue), 50) + 2
93 self.__initlnOR = self.__lengthOR
94 self.__seenNames.append(str(oName))
95 if str(oName) not in self.__seenNames: # Etend la liste si nouveau
96 self.__lengthOR += 2 * min(numpy.size(xValue), 50) + 2
97 self.__initlnOR += self.__lengthOR
98 self.__seenNames.append(str(oName))
99 while len(self.__listOPCV) > self.__lengthOR:
100 self.__listOPCV.pop(0)
101 self.__listOPCV.append( (
102 copy.copy(numpy.ravel(xValue)), # 0 Previous point
103 copy.copy(HxValue), # 1 Previous value
104 numpy.linalg.norm(xValue), # 2 Norm
105 str(oName), # 3 Operator name
110 self.__initlnOR = self.__lengthOR
112 self.__enabled = False
116 self.__lengthOR = self.__initlnOR
117 self.__enabled = True
119 # ==============================================================================
120 class Operator(object):
122 Classe générale d'interface de type opérateur simple
125 "__name", "__NbCallsAsMatrix", "__NbCallsAsMethod",
126 "__NbCallsOfCached", "__reduceM", "__avoidRC", "__inputAsMF",
127 "__mpEnabled", "__extraArgs", "__Method", "__Matrix", "__Type",
136 name = "GenericOperator",
139 avoidingRedundancy = True,
140 reducingMemoryUse = False,
141 inputAsMultiFunction = False,
142 enableMultiProcess = False,
143 extraArguments = None,
146 On construit un objet de ce type en fournissant, à l'aide de l'un des
147 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
150 - name : nom d'opérateur
151 - fromMethod : argument de type fonction Python
152 - fromMatrix : argument adapté au constructeur numpy.array/matrix
153 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
154 - reducingMemoryUse : booléen forçant (ou pas) des calculs moins
156 - inputAsMultiFunction : booléen indiquant une fonction explicitement
157 définie (ou pas) en multi-fonction
158 - extraArguments : arguments supplémentaires passés à la fonction de
159 base et ses dérivées (tuple ou dictionnaire)
161 self.__name = str(name)
162 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
163 self.__reduceM = bool( reducingMemoryUse )
164 self.__avoidRC = bool( avoidingRedundancy )
165 self.__inputAsMF = bool( inputAsMultiFunction )
166 self.__mpEnabled = bool( enableMultiProcess )
167 self.__extraArgs = extraArguments
168 if fromMethod is not None and self.__inputAsMF:
169 self.__Method = fromMethod # logtimer(fromMethod)
171 self.__Type = "Method"
172 elif fromMethod is not None and not self.__inputAsMF:
173 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
175 self.__Type = "Method"
176 elif fromMatrix is not None:
178 if isinstance(fromMatrix, str):
179 fromMatrix = PlatformInfo.strmatrix2liststr( fromMatrix )
180 self.__Matrix = numpy.asarray( fromMatrix, dtype=float )
181 self.__Type = "Matrix"
187 def disableAvoidingRedundancy(self):
189 Operator.CM.disable()
191 def enableAvoidingRedundancy(self):
196 Operator.CM.disable()
202 def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False):
204 Permet de restituer le résultat de l'application de l'opérateur à une
205 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
206 argument devant a priori être du bon type.
208 - les arguments par série sont :
209 - xValue : argument adapté pour appliquer l'opérateur
210 - HValue : valeur précalculée de l'opérateur en ce point
211 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
218 if HValue is not None:
222 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
224 if _HValue is not None:
225 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
227 for i in range(len(_HValue)):
228 _HxValue.append( _HValue[i] )
230 Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name)
235 for i, xv in enumerate(_xValue):
237 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
239 __alreadyCalculated = False
241 if __alreadyCalculated:
242 self.__addOneCacheCall()
245 if self.__Matrix is not None:
246 self.__addOneMatrixCall()
247 _hv = self.__Matrix @ numpy.ravel(xv)
249 self.__addOneMethodCall()
253 _HxValue.append( _hv )
255 if len(_xserie)>0 and self.__Matrix is None:
256 if self.__extraArgs is None:
257 _hserie = self.__Method( _xserie ) # Calcul MF
259 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
260 if not hasattr(_hserie, "pop"):
262 "The user input multi-function doesn't seem to return a"+\
263 " result sequence, behaving like a mono-function. It has"+\
271 Operator.CM.storeValueInX(_xv,_hv,self.__name)
273 if returnSerieAsArrayMatrix:
274 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
276 if argsAsSerie: return _HxValue
277 else: return _HxValue[-1]
279 def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
281 Permet de restituer le résultat de l'application de l'opérateur à des
282 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
283 argument devant a priori être du bon type. Si la uValue est None,
284 on suppose que l'opérateur ne s'applique qu'à xValue.
286 - paires : les arguments par paire sont :
287 - xValue : argument X adapté pour appliquer l'opérateur
288 - uValue : argument U adapté pour appliquer l'opérateur
289 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
291 if argsAsSerie: _xuValue = paires
292 else: _xuValue = (paires,)
293 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
295 if self.__Matrix is not None:
297 for paire in _xuValue:
298 _xValue, _uValue = paire
299 self.__addOneMatrixCall()
300 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
303 for paire in _xuValue:
304 _xValue, _uValue = paire
305 if _uValue is not None:
306 _xuArgs.append( paire )
308 _xuArgs.append( _xValue )
309 self.__addOneMethodCall( len(_xuArgs) )
310 if self.__extraArgs is None:
311 _HxValue = self.__Method( _xuArgs ) # Calcul MF
313 _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF
315 if returnSerieAsArrayMatrix:
316 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
318 if argsAsSerie: return _HxValue
319 else: return _HxValue[-1]
321 def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
323 Permet de restituer le résultat de l'application de l'opérateur à une
324 série d'arguments xValue, sachant que l'opérateur est valable en
325 xNominal. Cette méthode se contente d'appliquer, son argument devant a
326 priori être du bon type. Si l'opérateur est linéaire car c'est une
327 matrice, alors il est valable en tout point nominal et xNominal peut
328 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
329 permet d'indiquer que l'argument est multi-paires.
331 - paires : les arguments par paire sont :
332 - xNominal : série d'arguments permettant de donner le point où
333 l'opérateur est construit pour être ensuite appliqué
334 - xValue : série d'arguments adaptés pour appliquer l'opérateur
335 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
337 if argsAsSerie: _nxValue = paires
338 else: _nxValue = (paires,)
339 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
341 if self.__Matrix is not None:
343 for paire in _nxValue:
344 _xNominal, _xValue = paire
345 self.__addOneMatrixCall()
346 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
348 self.__addOneMethodCall( len(_nxValue) )
349 if self.__extraArgs is None:
350 _HxValue = self.__Method( _nxValue ) # Calcul MF
352 _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
354 if returnSerieAsArrayMatrix:
355 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
357 if argsAsSerie: return _HxValue
358 else: return _HxValue[-1]
360 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
362 Permet de renvoyer l'opérateur sous la forme d'une matrice
364 if self.__Matrix is not None:
365 self.__addOneMatrixCall()
366 mValue = [self.__Matrix,]
367 elif not isinstance(ValueForMethodForm,str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
370 self.__addOneMethodCall( len(ValueForMethodForm) )
371 for _vfmf in ValueForMethodForm:
372 mValue.append( self.__Method(((_vfmf, None),)) )
374 self.__addOneMethodCall()
375 mValue = self.__Method(((ValueForMethodForm, None),))
377 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
379 if argsAsSerie: return mValue
380 else: return mValue[-1]
384 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
385 la forme d'une matrice
387 if self.__Matrix is not None:
388 return self.__Matrix.shape
390 raise ValueError("Matrix form of the operator is not available, nor the shape")
392 def nbcalls(self, which=None):
394 Renvoie les nombres d'évaluations de l'opérateur
397 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
398 self.__NbCallsAsMatrix,
399 self.__NbCallsAsMethod,
400 self.__NbCallsOfCached,
401 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
402 Operator.NbCallsAsMatrix,
403 Operator.NbCallsAsMethod,
404 Operator.NbCallsOfCached,
406 if which is None: return __nbcalls
407 else: return __nbcalls[which]
409 def __addOneMatrixCall(self):
410 "Comptabilise un appel"
411 self.__NbCallsAsMatrix += 1 # Decompte local
412 Operator.NbCallsAsMatrix += 1 # Decompte global
414 def __addOneMethodCall(self, nb = 1):
415 "Comptabilise un appel"
416 self.__NbCallsAsMethod += nb # Decompte local
417 Operator.NbCallsAsMethod += nb # Decompte global
419 def __addOneCacheCall(self):
420 "Comptabilise un appel"
421 self.__NbCallsOfCached += 1 # Decompte local
422 Operator.NbCallsOfCached += 1 # Decompte global
424 # ==============================================================================
425 class FullOperator(object):
427 Classe générale d'interface de type opérateur complet
428 (Direct, Linéaire Tangent, Adjoint)
431 "__name", "__check", "__extraArgs", "__FO", "__T",
435 name = "GenericFullOperator",
437 asOneFunction = None, # 1 Fonction
438 asThreeFunctions = None, # 3 Fonctions in a dictionary
439 asScript = None, # 1 or 3 Fonction(s) by script
440 asDict = None, # Parameters
442 extraArguments = None,
443 performancePrf = None,
444 inputAsMF = False,# Fonction(s) as Multi-Functions
449 self.__name = str(name)
450 self.__check = bool(toBeChecked)
451 self.__extraArgs = extraArguments
456 if (asDict is not None) and isinstance(asDict, dict):
457 __Parameters.update( asDict )
458 # Priorité à EnableMultiProcessingInDerivatives=True
459 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
460 __Parameters["EnableMultiProcessingInDerivatives"] = True
461 __Parameters["EnableMultiProcessingInEvaluation"] = False
462 if "EnableMultiProcessingInDerivatives" not in __Parameters:
463 __Parameters["EnableMultiProcessingInDerivatives"] = False
464 if __Parameters["EnableMultiProcessingInDerivatives"]:
465 __Parameters["EnableMultiProcessingInEvaluation"] = False
466 if "EnableMultiProcessingInEvaluation" not in __Parameters:
467 __Parameters["EnableMultiProcessingInEvaluation"] = False
468 if "withIncrement" in __Parameters: # Temporaire
469 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
470 # Le défaut est équivalent à "ReducedOverallRequirements"
471 __reduceM, __avoidRC = True, True
472 if performancePrf is not None:
473 if performancePrf == "ReducedAmountOfCalculation":
474 __reduceM, __avoidRC = False, True
475 elif performancePrf == "ReducedMemoryFootprint":
476 __reduceM, __avoidRC = True, False
477 elif performancePrf == "NoSavings":
478 __reduceM, __avoidRC = False, False
480 if asScript is not None:
481 __Matrix, __Function = None, None
483 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
485 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
486 __Function.update({"useApproximatedDerivatives":True})
487 __Function.update(__Parameters)
488 elif asThreeFunctions:
490 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
491 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
492 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
494 __Function.update(__Parameters)
497 if asOneFunction is not None:
498 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
499 if asOneFunction["Direct"] is not None:
500 __Function = asOneFunction
502 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
504 __Function = { "Direct":asOneFunction }
505 __Function.update({"useApproximatedDerivatives":True})
506 __Function.update(__Parameters)
507 elif asThreeFunctions is not None:
508 if isinstance(asThreeFunctions, dict) and \
509 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
510 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
511 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
512 __Function = asThreeFunctions
513 elif isinstance(asThreeFunctions, dict) and \
514 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
515 __Function = asThreeFunctions
516 __Function.update({"useApproximatedDerivatives":True})
519 "The functions has to be given in a dictionnary which have either"+\
520 " 1 key (\"Direct\") or"+\
521 " 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
522 if "Direct" not in asThreeFunctions:
523 __Function["Direct"] = asThreeFunctions["Tangent"]
524 __Function.update(__Parameters)
528 if appliedInX is not None and isinstance(appliedInX, dict):
529 __appliedInX = appliedInX
530 elif appliedInX is not None:
531 __appliedInX = {"HXb":appliedInX}
535 if scheduledBy is not None:
536 self.__T = scheduledBy
538 if isinstance(__Function, dict) and \
539 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
540 ("Direct" in __Function) and (__Function["Direct"] is not None):
541 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
542 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
543 if "withdX" not in __Function: __Function["withdX"] = None
544 if "withReducingMemoryUse" not in __Function: __Function["withReducingMemoryUse"] = __reduceM
545 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = __avoidRC
546 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
547 if "withLengthOfRedundancy" not in __Function: __Function["withLengthOfRedundancy"] = -1
548 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
549 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
550 from daCore import NumericObjects
551 FDA = NumericObjects.FDApproximation(
553 Function = __Function["Direct"],
554 centeredDF = __Function["CenteredFiniteDifference"],
555 increment = __Function["DifferentialIncrement"],
556 dX = __Function["withdX"],
557 extraArguments = self.__extraArgs,
558 reducingMemoryUse = __Function["withReducingMemoryUse"],
559 avoidingRedundancy = __Function["withAvoidingRedundancy"],
560 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
561 lengthOfRedundancy = __Function["withLengthOfRedundancy"],
562 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
563 mpWorkers = __Function["NumberOfProcesses"],
564 mfEnabled = __Function["withmfEnabled"],
566 self.__FO["Direct"] = Operator(
568 fromMethod = FDA.DirectOperator,
569 reducingMemoryUse = __reduceM,
570 avoidingRedundancy = __avoidRC,
571 inputAsMultiFunction = inputAsMF,
572 extraArguments = self.__extraArgs,
573 enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
574 self.__FO["Tangent"] = Operator(
575 name = self.__name+"Tangent",
576 fromMethod = FDA.TangentOperator,
577 reducingMemoryUse = __reduceM,
578 avoidingRedundancy = __avoidRC,
579 inputAsMultiFunction = inputAsMF,
580 extraArguments = self.__extraArgs )
581 self.__FO["Adjoint"] = Operator(
582 name = self.__name+"Adjoint",
583 fromMethod = FDA.AdjointOperator,
584 reducingMemoryUse = __reduceM,
585 avoidingRedundancy = __avoidRC,
586 inputAsMultiFunction = inputAsMF,
587 extraArguments = self.__extraArgs )
588 elif isinstance(__Function, dict) and \
589 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
590 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
591 self.__FO["Direct"] = Operator(
593 fromMethod = __Function["Direct"],
594 reducingMemoryUse = __reduceM,
595 avoidingRedundancy = __avoidRC,
596 inputAsMultiFunction = inputAsMF,
597 extraArguments = self.__extraArgs,
598 enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
599 self.__FO["Tangent"] = Operator(
600 name = self.__name+"Tangent",
601 fromMethod = __Function["Tangent"],
602 reducingMemoryUse = __reduceM,
603 avoidingRedundancy = __avoidRC,
604 inputAsMultiFunction = inputAsMF,
605 extraArguments = self.__extraArgs )
606 self.__FO["Adjoint"] = Operator(
607 name = self.__name+"Adjoint",
608 fromMethod = __Function["Adjoint"],
609 reducingMemoryUse = __reduceM,
610 avoidingRedundancy = __avoidRC,
611 inputAsMultiFunction = inputAsMF,
612 extraArguments = self.__extraArgs )
613 elif asMatrix is not None:
614 if isinstance(__Matrix, str):
615 __Matrix = PlatformInfo.strmatrix2liststr( __Matrix )
616 __matrice = numpy.asarray( __Matrix, dtype=float )
617 self.__FO["Direct"] = Operator(
619 fromMatrix = __matrice,
620 reducingMemoryUse = __reduceM,
621 avoidingRedundancy = __avoidRC,
622 inputAsMultiFunction = inputAsMF,
623 enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
624 self.__FO["Tangent"] = Operator(
625 name = self.__name+"Tangent",
626 fromMatrix = __matrice,
627 reducingMemoryUse = __reduceM,
628 avoidingRedundancy = __avoidRC,
629 inputAsMultiFunction = inputAsMF )
630 self.__FO["Adjoint"] = Operator(
631 name = self.__name+"Adjoint",
632 fromMatrix = __matrice.T,
633 reducingMemoryUse = __reduceM,
634 avoidingRedundancy = __avoidRC,
635 inputAsMultiFunction = inputAsMF )
639 "The %s object is improperly defined or undefined,"%self.__name+\
640 " it requires at minima either a matrix, a Direct operator for"+\
641 " approximate derivatives or a Tangent/Adjoint operators pair."+\
642 " Please check your operator input.")
644 if __appliedInX is not None:
645 self.__FO["AppliedInX"] = {}
646 for key in __appliedInX:
647 if isinstance(__appliedInX[key], str):
648 __appliedInX[key] = PlatformInfo.strvect2liststr( __appliedInX[key] )
649 self.__FO["AppliedInX"][key] = numpy.ravel( __appliedInX[key] ).reshape((-1,1))
651 self.__FO["AppliedInX"] = None
657 "x.__repr__() <==> repr(x)"
658 return repr(self.__FO)
661 "x.__str__() <==> str(x)"
662 return str(self.__FO)
664 # ==============================================================================
665 class Algorithm(object):
667 Classe générale d'interface de type algorithme
669 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
670 d'assimilation, en fournissant un container (dictionnaire) de variables
671 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
673 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
676 "_name", "_parameters", "__internal_state", "__required_parameters",
677 "_m", "__variable_names_not_public", "__canonical_parameter_name",
678 "__canonical_stored_name", "__replace_by_the_new_name",
682 def __init__(self, name):
684 L'initialisation présente permet de fabriquer des variables de stockage
685 disponibles de manière générique dans les algorithmes élémentaires. Ces
686 variables de stockage sont ensuite conservées dans un dictionnaire
687 interne à l'objet, mais auquel on accède par la méthode "get".
689 Les variables prévues sont :
690 - APosterioriCorrelations : matrice de corrélations de la matrice A
691 - APosterioriCovariance : matrice de covariances a posteriori : A
692 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
693 - APosterioriVariances : vecteur des variances de la matrice A
694 - Analysis : vecteur d'analyse : Xa
695 - BMA : Background moins Analysis : Xa - Xb
696 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
697 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
698 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
699 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
700 - CostFunctionJo : partie observations de la fonction-coût : Jo
701 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
702 - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
703 - CurrentOptimum : état optimal courant lors d'itérations
704 - CurrentState : état courant lors d'itérations
705 - CurrentStepNumber : pas courant d'avancement dans les algorithmes en évolution, à partir de 0
706 - EnsembleOfSimulations : ensemble d'états (sorties, simulations) rangés par colonne dans une matrice
707 - EnsembleOfSnapshots : ensemble d'états rangés par colonne dans une matrice
708 - EnsembleOfStates : ensemble d'états (entrées, paramètres) rangés par colonne dans une matrice
709 - ForecastCovariance : covariance de l'état prédit courant lors d'itérations
710 - ForecastState : état prédit courant lors d'itérations
711 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
712 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
713 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
714 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
715 - Innovation : l'innovation : d = Y - H(X)
716 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
717 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
718 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
719 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
720 - KalmanGainAtOptimum : gain de Kalman à l'optimum
721 - MahalanobisConsistency : indicateur de consistance des covariances
722 - OMA : Observation moins Analyse : Y - Xa
723 - OMB : Observation moins Background : Y - Xb
724 - Residu : dans le cas des algorithmes de vérification
725 - SampledStateForQuantiles : échantillons d'états pour l'estimation des quantiles
726 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
727 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
728 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
729 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
730 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
731 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
732 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
733 On peut rajouter des variables à stocker dans l'initialisation de
734 l'algorithme élémentaire qui va hériter de cette classe
736 logging.debug("%s Initialisation", str(name))
737 self._m = PlatformInfo.SystemUsage()
739 self._name = str( name )
740 self._parameters = {"StoreSupplementaryCalculations":[]}
741 self.__internal_state = {}
742 self.__required_parameters = {}
743 self.__required_inputs = {
744 "RequiredInputValues":{"mandatory":(), "optional":()},
745 "ClassificationTags":[],
747 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
748 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
749 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
750 self.__replace_by_the_new_name = {} # Nouveau nom à partir d'un nom ancien
752 self.StoredVariables = {}
753 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
754 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
755 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
756 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
757 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
758 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
759 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
760 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
761 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
762 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
763 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
764 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
765 self.StoredVariables["CurrentEnsembleState"] = Persistence.OneMatrix(name = "CurrentEnsembleState")
766 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
767 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
768 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
769 self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
770 self.StoredVariables["EnsembleOfSimulations"] = Persistence.OneMatrix(name = "EnsembleOfSimulations")
771 self.StoredVariables["EnsembleOfSnapshots"] = Persistence.OneMatrix(name = "EnsembleOfSnapshots")
772 self.StoredVariables["EnsembleOfStates"] = Persistence.OneMatrix(name = "EnsembleOfStates")
773 self.StoredVariables["ExcludedPoints"] = Persistence.OneVector(name = "ExcludedPoints")
774 self.StoredVariables["ForecastCovariance"] = Persistence.OneMatrix(name = "ForecastCovariance")
775 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
776 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
777 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
778 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
779 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
780 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
781 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
782 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
783 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
784 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
785 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
786 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
787 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
788 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
789 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
790 self.StoredVariables["OptimalPoints"] = Persistence.OneVector(name = "OptimalPoints")
791 self.StoredVariables["ReducedBasis"] = Persistence.OneMatrix(name = "ReducedBasis")
792 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
793 self.StoredVariables["Residus"] = Persistence.OneVector(name = "Residus")
794 self.StoredVariables["SampledStateForQuantiles"] = Persistence.OneMatrix(name = "SampledStateForQuantiles")
795 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
796 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
797 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
798 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
799 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
800 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
801 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
802 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
804 for k in self.StoredVariables:
805 self.__canonical_stored_name[k.lower()] = k
807 for k, v in self.__variable_names_not_public.items():
808 self.__canonical_parameter_name[k.lower()] = k
809 self.__canonical_parameter_name["algorithm"] = "Algorithm"
810 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
812 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
814 logging.debug("%s Lancement", self._name)
815 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
816 self._getTimeState(reset=True)
818 # Mise à jour des paramètres internes avec le contenu de Parameters, en
819 # reprenant les valeurs par défauts pour toutes celles non définies
820 self.__setParameters(Parameters, reset=True)
821 for k, v in self.__variable_names_not_public.items():
822 if k not in self._parameters: self.__setParameters( {k:v} )
824 # Corrections et compléments des vecteurs
825 def __test_vvalue(argument, variable, argname, symbol=None):
826 if symbol is None: symbol = variable
828 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
829 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol))
830 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
831 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol))
833 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol))
835 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
836 logging.debug("%s %s vector %s is required and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
837 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
838 logging.debug("%s %s vector %s is optional and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
841 "%s %s vector %s is set although neither required nor optional, and its size is %i."%(
842 self._name,argname,symbol,numpy.array(argument).size))
844 __test_vvalue( Xb, "Xb", "Background or initial state" )
845 __test_vvalue( Y, "Y", "Observation" )
846 __test_vvalue( U, "U", "Control" )
848 # Corrections et compléments des covariances
849 def __test_cvalue(argument, variable, argname, symbol=None):
850 if symbol is None: symbol = variable
852 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
853 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol))
854 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
855 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol))
857 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol))
859 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
860 logging.debug("%s %s error covariance matrix %s is required and set."%(self._name,argname,symbol))
861 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
862 logging.debug("%s %s error covariance matrix %s is optional and set."%(self._name,argname,symbol))
864 logging.debug("%s %s error covariance matrix %s is set although neither required nor optional."%(self._name,argname,symbol))
866 __test_cvalue( B, "B", "Background" )
867 __test_cvalue( R, "R", "Observation" )
868 __test_cvalue( Q, "Q", "Evolution" )
870 # Corrections et compléments des opérateurs
871 def __test_ovalue(argument, variable, argname, symbol=None):
872 if symbol is None: symbol = variable
873 if argument is None or (isinstance(argument,dict) and len(argument)==0):
874 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
875 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol))
876 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
877 logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol))
879 logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol))
881 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
882 logging.debug("%s %s operator %s is required and set."%(self._name,argname,symbol))
883 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
884 logging.debug("%s %s operator %s is optional and set."%(self._name,argname,symbol))
886 logging.debug("%s %s operator %s is set although neither required nor optional."%(self._name,argname,symbol))
888 __test_ovalue( HO, "HO", "Observation", "H" )
889 __test_ovalue( EM, "EM", "Evolution", "M" )
890 __test_ovalue( CM, "CM", "Control Model", "C" )
892 # Corrections et compléments des bornes
893 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
894 logging.debug("%s Bounds taken into account"%(self._name,))
896 self._parameters["Bounds"] = None
897 if ("StateBoundsForQuantiles" in self._parameters) \
898 and isinstance(self._parameters["StateBoundsForQuantiles"], (list, tuple)) \
899 and (len(self._parameters["StateBoundsForQuantiles"]) > 0):
900 logging.debug("%s Bounds for quantiles states taken into account"%(self._name,))
901 # Attention : contrairement à Bounds, pas de défaut à None, sinon on ne peut pas être sans bornes
903 # Corrections et compléments de l'initialisation en X
904 if "InitializationPoint" in self._parameters:
906 if self._parameters["InitializationPoint"] is not None and hasattr(self._parameters["InitializationPoint"],'size'):
907 if self._parameters["InitializationPoint"].size != numpy.ravel(Xb).size:
908 raise ValueError("Incompatible size %i of forced initial point that have to replace the background of size %i" \
909 %(self._parameters["InitializationPoint"].size,numpy.ravel(Xb).size))
910 # Obtenu par typecast : numpy.ravel(self._parameters["InitializationPoint"])
912 self._parameters["InitializationPoint"] = numpy.ravel(Xb)
914 if self._parameters["InitializationPoint"] is None:
915 raise ValueError("Forced initial point can not be set without any given Background or required value")
917 # Correction pour pallier a un bug de TNC sur le retour du Minimum
918 if "Minimizer" in self._parameters and self._parameters["Minimizer"] == "TNC":
919 self.setParameterValue("StoreInternalVariables",True)
921 # Verbosité et logging
922 if logging.getLogger().level < logging.WARNING:
923 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
924 self._parameters["optmessages"] = 15
926 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
927 self._parameters["optmessages"] = 0
931 def _post_run(self,_oH=None):
933 if ("StoreSupplementaryCalculations" in self._parameters) and \
934 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
935 for _A in self.StoredVariables["APosterioriCovariance"]:
936 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
937 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
938 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
939 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
940 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
941 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
942 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
943 self.StoredVariables["APosterioriCorrelations"].store( _C )
944 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
946 "%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i",
947 self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
949 "%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i",
950 self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
951 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
952 logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
953 logging.debug("%s Terminé", self._name)
956 def _toStore(self, key):
957 "True if in StoreSupplementaryCalculations, else False"
958 return key in self._parameters["StoreSupplementaryCalculations"]
960 def get(self, key=None):
962 Renvoie l'une des variables stockées identifiée par la clé, ou le
963 dictionnaire de l'ensemble des variables disponibles en l'absence de
964 clé. Ce sont directement les variables sous forme objet qui sont
965 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
966 des classes de persistance.
969 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
971 return self.StoredVariables
973 def __contains__(self, key=None):
974 "D.__contains__(k) -> True if D has a key k, else False"
975 if key is None or key.lower() not in self.__canonical_stored_name:
978 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
981 "D.keys() -> list of D's keys"
982 if hasattr(self, "StoredVariables"):
983 return self.StoredVariables.keys()
988 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
989 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
990 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
995 raise TypeError("pop expected at least 1 arguments, got 0")
996 "If key is not found, d is returned if given, otherwise KeyError is raised"
1002 def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
1004 Doit implémenter l'opération élémentaire de calcul algorithmique.
1006 raise NotImplementedError("Mathematical algorithmic calculation has not been implemented!")
1008 def defineRequiredParameter(self,
1020 Permet de définir dans l'algorithme des paramètres requis et leurs
1021 caractéristiques par défaut.
1024 raise ValueError("A name is mandatory to define a required parameter.")
1026 self.__required_parameters[name] = {
1027 "default" : default,
1028 "typecast" : typecast,
1031 "listval" : listval,
1032 "listadv" : listadv,
1033 "message" : message,
1034 "oldname" : oldname,
1036 self.__canonical_parameter_name[name.lower()] = name
1037 if oldname is not None:
1038 self.__canonical_parameter_name[oldname.lower()] = name # Conversion
1039 self.__replace_by_the_new_name[oldname.lower()] = name
1040 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
1042 def getRequiredParameters(self, noDetails=True):
1044 Renvoie la liste des noms de paramètres requis ou directement le
1045 dictionnaire des paramètres requis.
1048 return sorted(self.__required_parameters.keys())
1050 return self.__required_parameters
1052 def setParameterValue(self, name=None, value=None):
1054 Renvoie la valeur d'un paramètre requis de manière contrôlée
1056 __k = self.__canonical_parameter_name[name.lower()]
1057 default = self.__required_parameters[__k]["default"]
1058 typecast = self.__required_parameters[__k]["typecast"]
1059 minval = self.__required_parameters[__k]["minval"]
1060 maxval = self.__required_parameters[__k]["maxval"]
1061 listval = self.__required_parameters[__k]["listval"]
1062 listadv = self.__required_parameters[__k]["listadv"]
1064 if value is None and default is None:
1066 elif value is None and default is not None:
1067 if typecast is None: __val = default
1068 else: __val = typecast( default )
1070 if typecast is None: __val = value
1073 __val = typecast( value )
1075 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
1077 if minval is not None and (numpy.array(__val, float) < minval).any():
1078 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
1079 if maxval is not None and (numpy.array(__val, float) > maxval).any():
1080 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
1081 if listval is not None or listadv is not None:
1082 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
1084 if listval is not None and v in listval: continue
1085 elif listadv is not None and v in listadv: continue
1087 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
1088 elif not (listval is not None and __val in listval) and not (listadv is not None and __val in listadv):
1089 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
1093 def requireInputArguments(self, mandatory=(), optional=()):
1095 Permet d'imposer des arguments de calcul requis en entrée.
1097 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
1098 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
1100 def getInputArguments(self):
1102 Permet d'obtenir les listes des arguments de calcul requis en entrée.
1104 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
1106 def setAttributes(self, tags=()):
1108 Permet d'adjoindre des attributs comme les tags de classification.
1109 Renvoie la liste actuelle dans tous les cas.
1111 self.__required_inputs["ClassificationTags"].extend( tags )
1112 return self.__required_inputs["ClassificationTags"]
1114 def __setParameters(self, fromDico={}, reset=False):
1116 Permet de stocker les paramètres reçus dans le dictionnaire interne.
1118 self._parameters.update( fromDico )
1119 __inverse_fromDico_keys = {}
1120 for k in fromDico.keys():
1121 if k.lower() in self.__canonical_parameter_name:
1122 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
1123 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
1124 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
1126 for k in __inverse_fromDico_keys.values():
1127 if k.lower() in self.__replace_by_the_new_name:
1128 __newk = self.__replace_by_the_new_name[k.lower()]
1129 __msg = "the parameter \"%s\" used in \"%s\" algorithm case is deprecated and has to be replaced by \"%s\"."%(k,self._name,__newk)
1130 __msg += " Please update your code."
1131 warnings.warn(__msg, FutureWarning, stacklevel=50)
1133 for k in self.__required_parameters.keys():
1134 if k in __canonic_fromDico_keys:
1135 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
1137 self._parameters[k] = self.setParameterValue(k)
1140 if hasattr(self._parameters[k],"__len__") and len(self._parameters[k]) > 100:
1141 logging.debug("%s %s de longueur %s", self._name, self.__required_parameters[k]["message"], len(self._parameters[k]))
1143 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
1145 def _setInternalState(self, key=None, value=None, fromDico={}, reset=False):
1147 Permet de stocker des variables nommées constituant l'état interne
1149 if reset: # Vide le dictionnaire préalablement
1150 self.__internal_state = {}
1151 if key is not None and value is not None:
1152 self.__internal_state[key] = value
1153 self.__internal_state.update( dict(fromDico) )
1155 def _getInternalState(self, key=None):
1157 Restitue un état interne sous la forme d'un dictionnaire de variables nommées
1159 if key is not None and key in self.__internal_state:
1160 return self.__internal_state[key]
1162 return self.__internal_state
1164 def _getTimeState(self, reset=False):
1166 Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde
1169 self.__initial_cpu_time = time.process_time()
1170 self.__initial_elapsed_time = time.perf_counter()
1173 self.__cpu_time = time.process_time() - self.__initial_cpu_time
1174 self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time
1175 return self.__cpu_time, self.__elapsed_time
1177 def _StopOnTimeLimit(self, X=None, withReason=False):
1178 "Stop criteria on time limit: True/False [+ Reason]"
1179 c, e = self._getTimeState()
1180 if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]:
1181 __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"])
1182 elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]:
1183 __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"])
1185 __SC, __SR = False, ""
1191 # ==============================================================================
1192 class PartialAlgorithm(object):
1194 Classe pour mimer "Algorithm" du point de vue stockage, mais sans aucune
1195 action avancée comme la vérification . Pour les méthodes reprises ici,
1196 le fonctionnement est identique à celles de la classe "Algorithm".
1199 "_name", "_parameters", "StoredVariables", "__canonical_stored_name",
1202 def __init__(self, name):
1203 self._name = str( name )
1204 self._parameters = {"StoreSupplementaryCalculations":[]}
1206 self.StoredVariables = {}
1207 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
1208 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
1209 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
1210 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
1211 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
1212 self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
1214 self.__canonical_stored_name = {}
1215 for k in self.StoredVariables:
1216 self.__canonical_stored_name[k.lower()] = k
1218 def _toStore(self, key):
1219 "True if in StoreSupplementaryCalculations, else False"
1220 return key in self._parameters["StoreSupplementaryCalculations"]
1222 def get(self, key=None):
1224 Renvoie l'une des variables stockées identifiée par la clé, ou le
1225 dictionnaire de l'ensemble des variables disponibles en l'absence de
1226 clé. Ce sont directement les variables sous forme objet qui sont
1227 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
1228 des classes de persistance.
1231 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
1233 return self.StoredVariables
1235 # ==============================================================================
1236 class AlgorithmAndParameters(object):
1238 Classe générale d'interface d'action pour l'algorithme et ses paramètres
1241 "__name", "__algorithm", "__algorithmFile", "__algorithmName", "__A",
1242 "__P", "__Xb", "__Y", "__U", "__HO", "__EM", "__CM", "__B", "__R",
1243 "__Q", "__variable_names_not_public",
1247 name = "GenericAlgorithm",
1254 self.__name = str(name)
1258 self.__algorithm = {}
1259 self.__algorithmFile = None
1260 self.__algorithmName = None
1262 self.updateParameters( asDict, asScript )
1264 if asAlgorithm is None and asScript is not None:
1265 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1267 __Algo = asAlgorithm
1269 if __Algo is not None:
1270 self.__A = str(__Algo)
1271 self.__P.update( {"Algorithm":self.__A} )
1273 self.__setAlgorithm( self.__A )
1275 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
1277 def updateParameters(self,
1281 "Mise à jour des paramètres"
1282 if asDict is None and asScript is not None:
1283 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1287 if __Dict is not None:
1288 self.__P.update( dict(__Dict) )
1290 def executePythonScheme(self, asDictAO = None):
1291 "Permet de lancer le calcul d'assimilation"
1292 Operator.CM.clearCache()
1294 if not isinstance(asDictAO, dict):
1295 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1296 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
1297 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
1298 else: self.__Xb = None
1299 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
1300 else: self.__Y = asDictAO["Observation"]
1301 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
1302 else: self.__U = asDictAO["ControlInput"]
1303 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
1304 else: self.__HO = asDictAO["ObservationOperator"]
1305 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
1306 else: self.__EM = asDictAO["EvolutionModel"]
1307 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
1308 else: self.__CM = asDictAO["ControlModel"]
1309 self.__B = asDictAO["BackgroundError"]
1310 self.__R = asDictAO["ObservationError"]
1311 self.__Q = asDictAO["EvolutionError"]
1313 self.__shape_validate()
1315 self.__algorithm.run(
1325 Parameters = self.__P,
1329 def executeYACSScheme(self, FileName=None):
1330 "Permet de lancer le calcul d'assimilation"
1331 if FileName is None or not os.path.exists(FileName):
1332 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1334 __file = os.path.abspath(FileName)
1335 logging.debug("The YACS file name is \"%s\"."%__file)
1336 if not PlatformInfo.has_salome or \
1337 not PlatformInfo.has_yacs or \
1338 not PlatformInfo.has_adao:
1339 raise ImportError("\n\n"+\
1340 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
1341 "Please load the right environnement before trying to use it.\n")
1344 import SALOMERuntime
1346 SALOMERuntime.RuntimeSALOME_setRuntime()
1348 r = pilot.getRuntime()
1349 xmlLoader = loader.YACSLoader()
1350 xmlLoader.registerProcCataLoader()
1352 catalogAd = r.loadCatalog("proc", __file)
1353 r.addCatalog(catalogAd)
1358 p = xmlLoader.load(__file)
1359 except IOError as ex:
1360 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1362 logger = p.getLogger("parser")
1363 if not logger.isEmpty():
1364 print("The imported YACS XML schema has errors on parsing:")
1365 print(logger.getStr())
1368 print("The YACS XML schema is not valid and will not be executed:")
1369 print(p.getErrorReport())
1371 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1372 p.checkConsistency(info)
1373 if info.areWarningsOrErrors():
1374 print("The YACS XML schema is not coherent and will not be executed:")
1375 print(info.getGlobalRepr())
1377 e = pilot.ExecutorSwig()
1379 if p.getEffectiveState() != pilot.DONE:
1380 print(p.getErrorReport())
1384 def get(self, key = None):
1385 "Vérifie l'existence d'une clé de variable ou de paramètres"
1386 if key in self.__algorithm:
1387 return self.__algorithm.get( key )
1388 elif key in self.__P:
1389 return self.__P[key]
1391 allvariables = self.__P
1392 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1395 def pop(self, k, d):
1396 "Necessaire pour le pickling"
1397 return self.__algorithm.pop(k, d)
1399 def getAlgorithmRequiredParameters(self, noDetails=True):
1400 "Renvoie la liste des paramètres requis selon l'algorithme"
1401 return self.__algorithm.getRequiredParameters(noDetails)
1403 def getAlgorithmInputArguments(self):
1404 "Renvoie la liste des entrées requises selon l'algorithme"
1405 return self.__algorithm.getInputArguments()
1407 def getAlgorithmAttributes(self):
1408 "Renvoie la liste des attributs selon l'algorithme"
1409 return self.__algorithm.setAttributes()
1411 def setObserver(self, __V, __O, __I, __S):
1412 if self.__algorithm is None \
1413 or isinstance(self.__algorithm, dict) \
1414 or not hasattr(self.__algorithm,"StoredVariables"):
1415 raise ValueError("No observer can be build before choosing an algorithm.")
1416 if __V not in self.__algorithm:
1417 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1419 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1422 HookParameters = __I,
1425 def removeObserver(self, __V, __O, __A = False):
1426 if self.__algorithm is None \
1427 or isinstance(self.__algorithm, dict) \
1428 or not hasattr(self.__algorithm,"StoredVariables"):
1429 raise ValueError("No observer can be removed before choosing an algorithm.")
1430 if __V not in self.__algorithm:
1431 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1433 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1438 def hasObserver(self, __V):
1439 if self.__algorithm is None \
1440 or isinstance(self.__algorithm, dict) \
1441 or not hasattr(self.__algorithm,"StoredVariables"):
1443 if __V not in self.__algorithm:
1445 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1448 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1449 for k in self.__variable_names_not_public:
1450 if k in __allvariables: __allvariables.remove(k)
1451 return __allvariables
1453 def __contains__(self, key=None):
1454 "D.__contains__(k) -> True if D has a key k, else False"
1455 return key in self.__algorithm or key in self.__P
1458 "x.__repr__() <==> repr(x)"
1459 return repr(self.__A)+", "+repr(self.__P)
1462 "x.__str__() <==> str(x)"
1463 return str(self.__A)+", "+str(self.__P)
1465 def __setAlgorithm(self, choice = None ):
1467 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1468 d'assimilation. L'argument est un champ caractère se rapportant au nom
1469 d'un algorithme réalisant l'opération sur les arguments fixes.
1472 raise ValueError("Error: algorithm choice has to be given")
1473 if self.__algorithmName is not None:
1474 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1475 daDirectory = "daAlgorithms"
1477 # Recherche explicitement le fichier complet
1478 # ------------------------------------------
1480 for directory in sys.path:
1481 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1482 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1483 if module_path is None:
1485 "No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1487 # Importe le fichier complet comme un module
1488 # ------------------------------------------
1490 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1491 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1492 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1493 raise ImportError("this module does not define a valid elementary algorithm.")
1494 self.__algorithmName = str(choice)
1495 sys.path = sys_path_tmp ; del sys_path_tmp
1496 except ImportError as e:
1498 "The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1500 # Instancie un objet du type élémentaire du fichier
1501 # -------------------------------------------------
1502 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1505 def __shape_validate(self):
1507 Validation de la correspondance correcte des tailles des variables et
1508 des matrices s'il y en a.
1510 if self.__Xb is None: __Xb_shape = (0,)
1511 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1512 elif hasattr(self.__Xb,"shape"):
1513 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1514 else: __Xb_shape = self.__Xb.shape()
1515 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1517 if self.__Y is None: __Y_shape = (0,)
1518 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1519 elif hasattr(self.__Y,"shape"):
1520 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1521 else: __Y_shape = self.__Y.shape()
1522 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1524 if self.__U is None: __U_shape = (0,)
1525 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1526 elif hasattr(self.__U,"shape"):
1527 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1528 else: __U_shape = self.__U.shape()
1529 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1531 if self.__B is None: __B_shape = (0,0)
1532 elif hasattr(self.__B,"shape"):
1533 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1534 else: __B_shape = self.__B.shape()
1535 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1537 if self.__R is None: __R_shape = (0,0)
1538 elif hasattr(self.__R,"shape"):
1539 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1540 else: __R_shape = self.__R.shape()
1541 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1543 if self.__Q is None: __Q_shape = (0,0)
1544 elif hasattr(self.__Q,"shape"):
1545 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1546 else: __Q_shape = self.__Q.shape()
1547 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1549 if len(self.__HO) == 0: __HO_shape = (0,0)
1550 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1551 elif hasattr(self.__HO["Direct"],"shape"):
1552 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1553 else: __HO_shape = self.__HO["Direct"].shape()
1554 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1556 if len(self.__EM) == 0: __EM_shape = (0,0)
1557 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1558 elif hasattr(self.__EM["Direct"],"shape"):
1559 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1560 else: __EM_shape = self.__EM["Direct"].shape()
1561 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1563 if len(self.__CM) == 0: __CM_shape = (0,0)
1564 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1565 elif hasattr(self.__CM["Direct"],"shape"):
1566 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1567 else: __CM_shape = self.__CM["Direct"].shape()
1568 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1570 # Vérification des conditions
1571 # ---------------------------
1572 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1573 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1574 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1575 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1577 if not( min(__B_shape) == max(__B_shape) ):
1578 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1579 if not( min(__R_shape) == max(__R_shape) ):
1580 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1581 if not( min(__Q_shape) == max(__Q_shape) ):
1582 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1583 if not( min(__EM_shape) == max(__EM_shape) ):
1584 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1586 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1588 "Shape characteristic of observation operator (H)"+\
1589 " \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1590 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1592 "Shape characteristic of observation operator (H)"+\
1593 " \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1594 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1596 "Shape characteristic of observation operator (H)"+\
1597 " \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1598 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1600 "Shape characteristic of observation operator (H)"+\
1601 " \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1603 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1604 if self.__algorithmName in ["EnsembleBlue",]:
1605 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1606 self.__Xb = Persistence.OneVector("Background")
1607 for member in asPersistentVector:
1608 self.__Xb.store( numpy.asarray(member, dtype=float) )
1609 __Xb_shape = min(__B_shape)
1612 "Shape characteristic of a priori errors covariance matrix (B)"+\
1613 " \"%s\" and background vector (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1615 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1617 "Shape characteristic of observation errors covariance matrix (R)"+\
1618 " \"%s\" and observation vector (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1620 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1622 "Shape characteristic of evolution model (EM)"+\
1623 " \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1625 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1627 "Shape characteristic of control model (CM)"+\
1628 " \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1630 if ("Bounds" in self.__P) \
1631 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1632 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1633 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1634 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1636 if ("StateBoundsForQuantiles" in self.__P) \
1637 and (isinstance(self.__P["StateBoundsForQuantiles"], list) or isinstance(self.__P["StateBoundsForQuantiles"], tuple)) \
1638 and (len(self.__P["StateBoundsForQuantiles"]) != max(__Xb_shape)):
1639 raise ValueError("The number \"%s\" of bound pairs for the quantile state (X) components is different of the size \"%s\" of the state itself." \
1640 %(len(self.__P["StateBoundsForQuantiles"]),max(__Xb_shape)))
1644 # ==============================================================================
1645 class RegulationAndParameters(object):
1647 Classe générale d'interface d'action pour la régulation et ses paramètres
1649 __slots__ = ("__name", "__P")
1652 name = "GenericRegulation",
1659 self.__name = str(name)
1662 if asAlgorithm is None and asScript is not None:
1663 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1665 __Algo = asAlgorithm
1667 if asDict is None and asScript is not None:
1668 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1672 if __Dict is not None:
1673 self.__P.update( dict(__Dict) )
1675 if __Algo is not None:
1676 self.__P.update( {"Algorithm":str(__Algo)} )
1678 def get(self, key = None):
1679 "Vérifie l'existence d'une clé de variable ou de paramètres"
1681 return self.__P[key]
1685 # ==============================================================================
1686 class DataObserver(object):
1688 Classe générale d'interface de type observer
1690 __slots__ = ("__name", "__V", "__O", "__I")
1693 name = "GenericObserver",
1705 self.__name = str(name)
1710 if onVariable is None:
1711 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1712 elif type(onVariable) in (tuple, list):
1713 self.__V = tuple(map( str, onVariable ))
1714 if withInfo is None:
1717 self.__I = (str(withInfo),)*len(self.__V)
1718 elif isinstance(onVariable, str):
1719 self.__V = (onVariable,)
1720 if withInfo is None:
1721 self.__I = (onVariable,)
1723 self.__I = (str(withInfo),)
1725 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1727 if asObsObject is not None:
1728 self.__O = asObsObject
1730 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1731 __Function = Observer2Func(__FunctionText)
1732 self.__O = __Function.getfunc()
1734 for k in range(len(self.__V)):
1737 if ename not in withAlgo:
1738 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1740 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1743 "x.__repr__() <==> repr(x)"
1744 return repr(self.__V)+"\n"+repr(self.__O)
1747 "x.__str__() <==> str(x)"
1748 return str(self.__V)+"\n"+str(self.__O)
1750 # ==============================================================================
1751 class UserScript(object):
1753 Classe générale d'interface de type texte de script utilisateur
1755 __slots__ = ("__name", "__F")
1758 name = "GenericUserScript",
1765 self.__name = str(name)
1767 if asString is not None:
1769 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1770 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1771 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1772 self.__F = Templates.ObserverTemplates[asTemplate]
1773 elif asScript is not None:
1774 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1779 "x.__repr__() <==> repr(x)"
1780 return repr(self.__F)
1783 "x.__str__() <==> str(x)"
1784 return str(self.__F)
1786 # ==============================================================================
1787 class ExternalParameters(object):
1789 Classe générale d'interface pour le stockage des paramètres externes
1791 __slots__ = ("__name", "__P")
1794 name = "GenericExternalParameters",
1800 self.__name = str(name)
1803 self.updateParameters( asDict, asScript )
1805 def updateParameters(self,
1809 "Mise à jour des paramètres"
1810 if asDict is None and asScript is not None:
1811 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1815 if __Dict is not None:
1816 self.__P.update( dict(__Dict) )
1818 def get(self, key = None):
1820 return self.__P[key]
1822 return list(self.__P.keys())
1825 return list(self.__P.keys())
1827 def pop(self, k, d):
1828 return self.__P.pop(k, d)
1831 return self.__P.items()
1833 def __contains__(self, key=None):
1834 "D.__contains__(k) -> True if D has a key k, else False"
1835 return key in self.__P
1837 # ==============================================================================
1838 class State(object):
1840 Classe générale d'interface de type état
1843 "__name", "__check", "__V", "__T", "__is_vector", "__is_series",
1848 name = "GenericVector",
1850 asPersistentVector = None,
1856 toBeChecked = False,
1859 Permet de définir un vecteur :
1860 - asVector : entrée des données, comme un vecteur compatible avec le
1861 constructeur de numpy.matrix, ou "True" si entrée par script.
1862 - asPersistentVector : entrée des données, comme une série de vecteurs
1863 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1864 type Persistence, ou "True" si entrée par script.
1865 - asScript : si un script valide est donné contenant une variable
1866 nommée "name", la variable est de type "asVector" (par défaut) ou
1867 "asPersistentVector" selon que l'une de ces variables est placée à
1869 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1870 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1871 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1872 nommée "name"), on récupère les colonnes et on les range ligne après
1873 ligne (colMajor=False, par défaut) ou colonne après colonne
1874 (colMajor=True). La variable résultante est de type "asVector" (par
1875 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1878 self.__name = str(name)
1879 self.__check = bool(toBeChecked)
1883 self.__is_vector = False
1884 self.__is_series = False
1886 if asScript is not None:
1887 __Vector, __Series = None, None
1888 if asPersistentVector:
1889 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1891 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1892 elif asDataFile is not None:
1893 __Vector, __Series = None, None
1894 if asPersistentVector:
1895 if colNames is not None:
1896 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1898 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1899 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1900 __Series = numpy.transpose(__Series)
1901 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1902 __Series = numpy.transpose(__Series)
1904 if colNames is not None:
1905 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1907 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1909 __Vector = numpy.ravel(__Vector, order = "F")
1911 __Vector = numpy.ravel(__Vector, order = "C")
1913 __Vector, __Series = asVector, asPersistentVector
1915 if __Vector is not None:
1916 self.__is_vector = True
1917 if isinstance(__Vector, str):
1918 __Vector = PlatformInfo.strvect2liststr( __Vector )
1919 self.__V = numpy.ravel(numpy.asarray( __Vector, dtype=float )).reshape((-1,1))
1920 self.shape = self.__V.shape
1921 self.size = self.__V.size
1922 elif __Series is not None:
1923 self.__is_series = True
1924 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1925 self.__V = Persistence.OneVector(self.__name)
1926 if isinstance(__Series, str):
1927 __Series = PlatformInfo.strmatrix2liststr(__Series)
1928 for member in __Series:
1929 if isinstance(member, str):
1930 member = PlatformInfo.strvect2liststr( member )
1931 self.__V.store(numpy.asarray( member, dtype=float ))
1934 if isinstance(self.__V.shape, (tuple, list)):
1935 self.shape = self.__V.shape
1937 self.shape = self.__V.shape()
1938 if len(self.shape) == 1:
1939 self.shape = (self.shape[0],1)
1940 self.size = self.shape[0] * self.shape[1]
1943 "The %s object is improperly defined or undefined,"%self.__name+\
1944 " it requires at minima either a vector, a list/tuple of"+\
1945 " vectors or a persistent object. Please check your vector input.")
1947 if scheduledBy is not None:
1948 self.__T = scheduledBy
1950 def getO(self, withScheduler=False):
1952 return self.__V, self.__T
1953 elif self.__T is None:
1959 "Vérification du type interne"
1960 return self.__is_vector
1963 "Vérification du type interne"
1964 return self.__is_series
1967 "x.__repr__() <==> repr(x)"
1968 return repr(self.__V)
1971 "x.__str__() <==> str(x)"
1972 return str(self.__V)
1974 # ==============================================================================
1975 class Covariance(object):
1977 Classe générale d'interface de type covariance
1980 "__name", "__check", "__C", "__is_scalar", "__is_vector", "__is_matrix",
1981 "__is_object", "shape", "size",
1985 name = "GenericCovariance",
1986 asCovariance = None,
1987 asEyeByScalar = None,
1988 asEyeByVector = None,
1991 toBeChecked = False,
1994 Permet de définir une covariance :
1995 - asCovariance : entrée des données, comme une matrice compatible avec
1996 le constructeur de numpy.matrix
1997 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1998 multiplicatif d'une matrice de corrélation identité, aucune matrice
1999 n'étant donc explicitement à donner
2000 - asEyeByVector : entrée des données comme un seul vecteur de variance,
2001 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
2002 n'étant donc explicitement à donner
2003 - asCovObject : entrée des données comme un objet python, qui a les
2004 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
2005 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
2006 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
2007 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
2008 pleine doit être vérifié
2010 self.__name = str(name)
2011 self.__check = bool(toBeChecked)
2014 self.__is_scalar = False
2015 self.__is_vector = False
2016 self.__is_matrix = False
2017 self.__is_object = False
2019 if asScript is not None:
2020 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
2022 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2024 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2026 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2028 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2030 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
2032 if __Scalar is not None:
2033 if isinstance(__Scalar, str):
2034 __Scalar = PlatformInfo.strvect2liststr( __Scalar )
2035 if len(__Scalar) > 0: __Scalar = __Scalar[0]
2036 if numpy.array(__Scalar).size != 1:
2038 " The diagonal multiplier given to define a sparse matrix is"+\
2039 " not a unique scalar value.\n Its actual measured size is"+\
2040 " %i. Please check your scalar input."%numpy.array(__Scalar).size)
2041 self.__is_scalar = True
2042 self.__C = numpy.abs( float(__Scalar) )
2045 elif __Vector is not None:
2046 if isinstance(__Vector, str):
2047 __Vector = PlatformInfo.strvect2liststr( __Vector )
2048 self.__is_vector = True
2049 self.__C = numpy.abs( numpy.ravel(numpy.asarray( __Vector, dtype=float )) )
2050 self.shape = (self.__C.size,self.__C.size)
2051 self.size = self.__C.size**2
2052 elif __Matrix is not None:
2053 self.__is_matrix = True
2054 self.__C = numpy.matrix( __Matrix, float )
2055 self.shape = self.__C.shape
2056 self.size = self.__C.size
2057 elif __Object is not None:
2058 self.__is_object = True
2060 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__matmul__","__mul__","__rmatmul__","__rmul__"):
2061 if not hasattr(self.__C,at):
2062 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
2063 if hasattr(self.__C,"shape"):
2064 self.shape = self.__C.shape
2067 if hasattr(self.__C,"size"):
2068 self.size = self.__C.size
2076 def __validate(self):
2078 if self.__C is None:
2079 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
2080 if self.ismatrix() and min(self.shape) != max(self.shape):
2081 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
2082 if self.isobject() and min(self.shape) != max(self.shape):
2083 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
2084 if self.isscalar() and self.__C <= 0:
2085 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
2086 if self.isvector() and (self.__C <= 0).any():
2087 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
2088 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
2090 numpy.linalg.cholesky( self.__C )
2092 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
2093 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
2097 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
2100 "Vérification du type interne"
2101 return self.__is_scalar
2104 "Vérification du type interne"
2105 return self.__is_vector
2108 "Vérification du type interne"
2109 return self.__is_matrix
2112 "Vérification du type interne"
2113 return self.__is_object
2118 return Covariance(self.__name+"I", asCovariance = numpy.linalg.inv(self.__C) )
2119 elif self.isvector():
2120 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
2121 elif self.isscalar():
2122 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
2123 elif self.isobject() and hasattr(self.__C,"getI"):
2124 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
2126 return None # Indispensable
2131 return Covariance(self.__name+"T", asCovariance = self.__C.T )
2132 elif self.isvector():
2133 return Covariance(self.__name+"T", asEyeByVector = self.__C )
2134 elif self.isscalar():
2135 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
2136 elif self.isobject() and hasattr(self.__C,"getT"):
2137 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
2139 raise AttributeError("the %s covariance matrix has no getT attribute."%(self.__name,))
2142 "Décomposition de Cholesky"
2144 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
2145 elif self.isvector():
2146 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
2147 elif self.isscalar():
2148 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
2149 elif self.isobject() and hasattr(self.__C,"cholesky"):
2150 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
2152 raise AttributeError("the %s covariance matrix has no cholesky attribute."%(self.__name,))
2154 def choleskyI(self):
2155 "Inversion de la décomposition de Cholesky"
2157 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.linalg.cholesky(self.__C)) )
2158 elif self.isvector():
2159 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
2160 elif self.isscalar():
2161 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
2162 elif self.isobject() and hasattr(self.__C,"choleskyI"):
2163 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
2165 raise AttributeError("the %s covariance matrix has no choleskyI attribute."%(self.__name,))
2168 "Racine carrée matricielle"
2171 return Covariance(self.__name+"C", asCovariance = numpy.real(scipy.linalg.sqrtm(self.__C)) )
2172 elif self.isvector():
2173 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
2174 elif self.isscalar():
2175 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
2176 elif self.isobject() and hasattr(self.__C,"sqrtm"):
2177 return Covariance(self.__name+"C", asCovObject = self.__C.sqrtm() )
2179 raise AttributeError("the %s covariance matrix has no sqrtm attribute."%(self.__name,))
2182 "Inversion de la racine carrée matricielle"
2185 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.real(scipy.linalg.sqrtm(self.__C))) )
2186 elif self.isvector():
2187 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
2188 elif self.isscalar():
2189 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
2190 elif self.isobject() and hasattr(self.__C,"sqrtmI"):
2191 return Covariance(self.__name+"H", asCovObject = self.__C.sqrtmI() )
2193 raise AttributeError("the %s covariance matrix has no sqrtmI attribute."%(self.__name,))
2195 def diag(self, msize=None):
2196 "Diagonale de la matrice"
2198 return numpy.diag(self.__C)
2199 elif self.isvector():
2201 elif self.isscalar():
2203 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2205 return self.__C * numpy.ones(int(msize))
2206 elif self.isobject() and hasattr(self.__C,"diag"):
2207 return self.__C.diag()
2209 raise AttributeError("the %s covariance matrix has no diag attribute."%(self.__name,))
2211 def trace(self, msize=None):
2212 "Trace de la matrice"
2214 return numpy.trace(self.__C)
2215 elif self.isvector():
2216 return float(numpy.sum(self.__C))
2217 elif self.isscalar():
2219 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2221 return self.__C * int(msize)
2222 elif self.isobject():
2223 return self.__C.trace()
2225 raise AttributeError("the %s covariance matrix has no trace attribute."%(self.__name,))
2227 def asfullmatrix(self, msize=None):
2230 return numpy.asarray(self.__C, dtype=float)
2231 elif self.isvector():
2232 return numpy.asarray( numpy.diag(self.__C), dtype=float )
2233 elif self.isscalar():
2235 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2237 return numpy.asarray( self.__C * numpy.eye(int(msize)), dtype=float )
2238 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
2239 return self.__C.asfullmatrix()
2241 raise AttributeError("the %s covariance matrix has no asfullmatrix attribute."%(self.__name,))
2243 def assparsematrix(self):
2251 "x.__repr__() <==> repr(x)"
2252 return repr(self.__C)
2255 "x.__str__() <==> str(x)"
2256 return str(self.__C)
2258 def __add__(self, other):
2259 "x.__add__(y) <==> x+y"
2260 if self.ismatrix() or self.isobject():
2261 return self.__C + numpy.asmatrix(other)
2262 elif self.isvector() or self.isscalar():
2263 _A = numpy.asarray(other)
2264 if len(_A.shape) == 1:
2265 _A.reshape((-1,1))[::2] += self.__C
2267 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
2268 return numpy.asmatrix(_A)
2270 def __radd__(self, other):
2271 "x.__radd__(y) <==> y+x"
2272 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
2274 def __sub__(self, other):
2275 "x.__sub__(y) <==> x-y"
2276 if self.ismatrix() or self.isobject():
2277 return self.__C - numpy.asmatrix(other)
2278 elif self.isvector() or self.isscalar():
2279 _A = numpy.asarray(other)
2280 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
2281 return numpy.asmatrix(_A)
2283 def __rsub__(self, other):
2284 "x.__rsub__(y) <==> y-x"
2285 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
2288 "x.__neg__() <==> -x"
2291 def __matmul__(self, other):
2292 "x.__mul__(y) <==> x@y"
2293 if self.ismatrix() and isinstance(other, (int, float)):
2294 return numpy.asarray(self.__C) * other
2295 elif self.ismatrix() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2296 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2297 return numpy.ravel(self.__C @ numpy.ravel(other))
2298 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2299 return numpy.asarray(self.__C) @ numpy.asarray(other)
2301 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asarray(other).shape,self.__name))
2302 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2303 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2304 return numpy.ravel(self.__C) * numpy.ravel(other)
2305 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2306 return numpy.ravel(self.__C).reshape((-1,1)) * numpy.asarray(other)
2308 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2309 elif self.isscalar() and isinstance(other,numpy.matrix):
2310 return numpy.asarray(self.__C * other)
2311 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2312 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2313 return self.__C * numpy.ravel(other)
2315 return self.__C * numpy.asarray(other)
2316 elif self.isobject():
2317 return self.__C.__matmul__(other)
2319 raise NotImplementedError("%s covariance matrix __matmul__ method not available for %s type!"%(self.__name,type(other)))
2321 def __mul__(self, other):
2322 "x.__mul__(y) <==> x*y"
2323 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2324 return self.__C * other
2325 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2326 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2327 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2328 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2329 return self.__C * numpy.asmatrix(other)
2332 "operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
2333 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2334 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2335 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
2336 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2337 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
2340 "operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2341 elif self.isscalar() and isinstance(other,numpy.matrix):
2342 return self.__C * other
2343 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2344 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2345 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2347 return self.__C * numpy.asmatrix(other)
2348 elif self.isobject():
2349 return self.__C.__mul__(other)
2351 raise NotImplementedError(
2352 "%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
2354 def __rmatmul__(self, other):
2355 "x.__rmul__(y) <==> y@x"
2356 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2357 return other * self.__C
2358 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2359 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2360 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2361 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2362 return numpy.asmatrix(other) * self.__C
2365 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2366 elif self.isvector() and isinstance(other,numpy.matrix):
2367 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2368 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2369 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2370 return numpy.asmatrix(numpy.array(other) * self.__C)
2373 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2374 elif self.isscalar() and isinstance(other,numpy.matrix):
2375 return other * self.__C
2376 elif self.isobject():
2377 return self.__C.__rmatmul__(other)
2379 raise NotImplementedError(
2380 "%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name,type(other)))
2382 def __rmul__(self, other):
2383 "x.__rmul__(y) <==> y*x"
2384 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2385 return other * self.__C
2386 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2387 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2388 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2389 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2390 return numpy.asmatrix(other) * self.__C
2393 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2394 elif self.isvector() and isinstance(other,numpy.matrix):
2395 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2396 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2397 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2398 return numpy.asmatrix(numpy.array(other) * self.__C)
2401 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2402 elif self.isscalar() and isinstance(other,numpy.matrix):
2403 return other * self.__C
2404 elif self.isscalar() and isinstance(other,float):
2405 return other * self.__C
2406 elif self.isobject():
2407 return self.__C.__rmul__(other)
2409 raise NotImplementedError(
2410 "%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
2413 "x.__len__() <==> len(x)"
2414 return self.shape[0]
2416 # ==============================================================================
2417 class Observer2Func(object):
2419 Création d'une fonction d'observateur a partir de son texte
2421 __slots__ = ("__corps")
2423 def __init__(self, corps=""):
2424 self.__corps = corps
2425 def func(self,var,info):
2426 "Fonction d'observation"
2429 "Restitution du pointeur de fonction dans l'objet"
2432 # ==============================================================================
2433 class CaseLogger(object):
2435 Conservation des commandes de création d'un cas
2438 "__name", "__objname", "__logSerie", "__switchoff", "__viewers",
2442 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
2443 self.__name = str(__name)
2444 self.__objname = str(__objname)
2445 self.__logSerie = []
2446 self.__switchoff = False
2448 "TUI" :Interfaces._TUIViewer,
2449 "SCD" :Interfaces._SCDViewer,
2450 "YACS":Interfaces._YACSViewer,
2451 "SimpleReportInRst":Interfaces._SimpleReportInRstViewer,
2452 "SimpleReportInHtml":Interfaces._SimpleReportInHtmlViewer,
2453 "SimpleReportInPlainTxt":Interfaces._SimpleReportInPlainTxtViewer,
2456 "TUI" :Interfaces._TUIViewer,
2457 "COM" :Interfaces._COMViewer,
2459 if __addViewers is not None:
2460 self.__viewers.update(dict(__addViewers))
2461 if __addLoaders is not None:
2462 self.__loaders.update(dict(__addLoaders))
2464 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
2465 "Enregistrement d'une commande individuelle"
2466 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
2467 if "self" in __keys: __keys.remove("self")
2468 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
2470 self.__switchoff = True
2472 self.__switchoff = False
2474 def dump(self, __filename=None, __format="TUI", __upa=""):
2475 "Restitution normalisée des commandes"
2476 if __format in self.__viewers:
2477 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2479 raise ValueError("Dumping as \"%s\" is not available"%__format)
2480 return __formater.dump(__filename, __upa)
2482 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2483 "Chargement normalisé des commandes"
2484 if __format in self.__loaders:
2485 __formater = self.__loaders[__format]()
2487 raise ValueError("Loading as \"%s\" is not available"%__format)
2488 return __formater.load(__filename, __content, __object)
2490 # ==============================================================================
2493 _extraArguments = None,
2494 _sFunction = lambda x: x,
2499 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2500 correspondante de valeurs de la fonction en argument
2502 # Vérifications et définitions initiales
2503 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2504 if not PlatformInfo.isIterable( __xserie ):
2505 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2507 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2510 __mpWorkers = int(_mpWorkers)
2512 import multiprocessing
2523 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2524 with multiprocessing.Pool(__mpWorkers) as pool:
2525 __multiHX = pool.map( _sFunction, _jobs )
2528 # logging.debug("MULTF Internal multiprocessing calculation end")
2530 # logging.debug("MULTF Internal monoprocessing calculation begin")
2532 if _extraArguments is None:
2533 for __xvalue in __xserie:
2534 __multiHX.append( _sFunction( __xvalue ) )
2535 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2536 for __xvalue in __xserie:
2537 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2538 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2539 for __xvalue in __xserie:
2540 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2542 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2543 # logging.debug("MULTF Internal monoprocessing calculation end")
2545 # logging.debug("MULTF Internal multifonction calculations end")
2548 # ==============================================================================
2549 if __name__ == "__main__":
2550 print('\n AUTODIAGNOSTIC\n')