1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2023 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
36 from functools import partial
37 from daCore import Persistence, PlatformInfo, Interfaces
38 from daCore import Templates
40 # ==============================================================================
41 class CacheManager(object):
43 Classe générale de gestion d'un cache de calculs
46 "__tolerBP", "__lengthOR", "__initlnOR", "__seenNames", "__enabled",
51 toleranceInRedundancy = 1.e-18,
52 lengthOfRedundancy = -1,
55 Les caractéristiques de tolérance peuvent être modifiées à la création.
57 self.__tolerBP = float(toleranceInRedundancy)
58 self.__lengthOR = int(lengthOfRedundancy)
59 self.__initlnOR = self.__lengthOR
69 def wasCalculatedIn(self, xValue, oName="" ):
70 "Vérifie l'existence d'un calcul correspondant à la valeur"
74 for i in range(min(len(self.__listOPCV),self.__lengthOR)-1,-1,-1):
75 if not hasattr(xValue, 'size'):
77 elif (str(oName) != self.__listOPCV[i][3]):
79 elif (xValue.size != self.__listOPCV[i][0].size):
81 elif (numpy.ravel(xValue)[0] - self.__listOPCV[i][0][0]) > (self.__tolerBP * self.__listOPCV[i][2] / self.__listOPCV[i][0].size):
83 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < (self.__tolerBP * self.__listOPCV[i][2]):
85 __HxV = self.__listOPCV[i][1]
89 def storeValueInX(self, xValue, HxValue, oName="" ):
90 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
91 if self.__lengthOR < 0:
92 self.__lengthOR = 2 * min(numpy.size(xValue), 50) + 2
93 self.__initlnOR = self.__lengthOR
94 self.__seenNames.append(str(oName))
95 if str(oName) not in self.__seenNames: # Etend la liste si nouveau
96 self.__lengthOR += 2 * min(numpy.size(xValue), 50) + 2
97 self.__initlnOR += self.__lengthOR
98 self.__seenNames.append(str(oName))
99 while len(self.__listOPCV) > self.__lengthOR:
100 self.__listOPCV.pop(0)
101 self.__listOPCV.append( (
102 copy.copy(numpy.ravel(xValue)), # 0 Previous point
103 copy.copy(HxValue), # 1 Previous value
104 numpy.linalg.norm(xValue), # 2 Norm
105 str(oName), # 3 Operator name
110 self.__initlnOR = self.__lengthOR
112 self.__enabled = False
116 self.__lengthOR = self.__initlnOR
117 self.__enabled = True
119 # ==============================================================================
120 class Operator(object):
122 Classe générale d'interface de type opérateur simple
125 "__name", "__NbCallsAsMatrix", "__NbCallsAsMethod",
126 "__NbCallsOfCached", "__reduceM", "__avoidRC", "__inputAsMF",
127 "__mpEnabled", "__extraArgs", "__Method", "__Matrix", "__Type",
136 name = "GenericOperator",
139 avoidingRedundancy = True,
140 reducingMemoryUse = False,
141 inputAsMultiFunction = False,
142 enableMultiProcess = False,
143 extraArguments = None,
146 On construit un objet de ce type en fournissant, à l'aide de l'un des
147 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
150 - name : nom d'opérateur
151 - fromMethod : argument de type fonction Python
152 - fromMatrix : argument adapté au constructeur numpy.array/matrix
153 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
154 - reducingMemoryUse : booléen forçant (ou pas) des calculs moins
156 - inputAsMultiFunction : booléen indiquant une fonction explicitement
157 définie (ou pas) en multi-fonction
158 - extraArguments : arguments supplémentaires passés à la fonction de
159 base et ses dérivées (tuple ou dictionnaire)
161 self.__name = str(name)
162 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
163 self.__reduceM = bool( reducingMemoryUse )
164 self.__avoidRC = bool( avoidingRedundancy )
165 self.__inputAsMF = bool( inputAsMultiFunction )
166 self.__mpEnabled = bool( enableMultiProcess )
167 self.__extraArgs = extraArguments
168 if fromMethod is not None and self.__inputAsMF:
169 self.__Method = fromMethod # logtimer(fromMethod)
171 self.__Type = "Method"
172 elif fromMethod is not None and not self.__inputAsMF:
173 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
175 self.__Type = "Method"
176 elif fromMatrix is not None:
178 if isinstance(fromMatrix, str):
179 fromMatrix = PlatformInfo.strmatrix2liststr( fromMatrix )
180 self.__Matrix = numpy.asarray( fromMatrix, dtype=float )
181 self.__Type = "Matrix"
187 def disableAvoidingRedundancy(self):
189 Operator.CM.disable()
191 def enableAvoidingRedundancy(self):
196 Operator.CM.disable()
202 def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False):
204 Permet de restituer le résultat de l'application de l'opérateur à une
205 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
206 argument devant a priori être du bon type.
208 - les arguments par série sont :
209 - xValue : argument adapté pour appliquer l'opérateur
210 - HValue : valeur précalculée de l'opérateur en ce point
211 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
218 if HValue is not None:
222 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
224 if _HValue is not None:
225 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
227 for i in range(len(_HValue)):
228 _HxValue.append( _HValue[i] )
230 Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name)
235 for i, xv in enumerate(_xValue):
237 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
239 __alreadyCalculated = False
241 if __alreadyCalculated:
242 self.__addOneCacheCall()
245 if self.__Matrix is not None:
246 self.__addOneMatrixCall()
247 _hv = self.__Matrix @ numpy.ravel(xv)
249 self.__addOneMethodCall()
253 _HxValue.append( _hv )
255 if len(_xserie)>0 and self.__Matrix is None:
256 if self.__extraArgs is None:
257 _hserie = self.__Method( _xserie ) # Calcul MF
259 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
260 if not hasattr(_hserie, "pop"):
262 "The user input multi-function doesn't seem to return a"+\
263 " result sequence, behaving like a mono-function. It has"+\
271 Operator.CM.storeValueInX(_xv,_hv,self.__name)
273 if returnSerieAsArrayMatrix:
274 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
276 if argsAsSerie: return _HxValue
277 else: return _HxValue[-1]
279 def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
281 Permet de restituer le résultat de l'application de l'opérateur à des
282 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
283 argument devant a priori être du bon type. Si la uValue est None,
284 on suppose que l'opérateur ne s'applique qu'à xValue.
286 - paires : les arguments par paire sont :
287 - xValue : argument X adapté pour appliquer l'opérateur
288 - uValue : argument U adapté pour appliquer l'opérateur
289 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
291 if argsAsSerie: _xuValue = paires
292 else: _xuValue = (paires,)
293 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
295 if self.__Matrix is not None:
297 for paire in _xuValue:
298 _xValue, _uValue = paire
299 self.__addOneMatrixCall()
300 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
303 for paire in _xuValue:
304 _xValue, _uValue = paire
305 if _uValue is not None:
306 _xuArgs.append( paire )
308 _xuArgs.append( _xValue )
309 self.__addOneMethodCall( len(_xuArgs) )
310 if self.__extraArgs is None:
311 _HxValue = self.__Method( _xuArgs ) # Calcul MF
313 _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF
315 if returnSerieAsArrayMatrix:
316 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
318 if argsAsSerie: return _HxValue
319 else: return _HxValue[-1]
321 def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
323 Permet de restituer le résultat de l'application de l'opérateur à une
324 série d'arguments xValue, sachant que l'opérateur est valable en
325 xNominal. Cette méthode se contente d'appliquer, son argument devant a
326 priori être du bon type. Si l'opérateur est linéaire car c'est une
327 matrice, alors il est valable en tout point nominal et xNominal peut
328 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
329 permet d'indiquer que l'argument est multi-paires.
331 - paires : les arguments par paire sont :
332 - xNominal : série d'arguments permettant de donner le point où
333 l'opérateur est construit pour être ensuite appliqué
334 - xValue : série d'arguments adaptés pour appliquer l'opérateur
335 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
337 if argsAsSerie: _nxValue = paires
338 else: _nxValue = (paires,)
339 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
341 if self.__Matrix is not None:
343 for paire in _nxValue:
344 _xNominal, _xValue = paire
345 self.__addOneMatrixCall()
346 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
348 self.__addOneMethodCall( len(_nxValue) )
349 if self.__extraArgs is None:
350 _HxValue = self.__Method( _nxValue ) # Calcul MF
352 _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
354 if returnSerieAsArrayMatrix:
355 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
357 if argsAsSerie: return _HxValue
358 else: return _HxValue[-1]
360 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
362 Permet de renvoyer l'opérateur sous la forme d'une matrice
364 if self.__Matrix is not None:
365 self.__addOneMatrixCall()
366 mValue = [self.__Matrix,]
367 elif not isinstance(ValueForMethodForm,str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
370 self.__addOneMethodCall( len(ValueForMethodForm) )
371 for _vfmf in ValueForMethodForm:
372 mValue.append( self.__Method(((_vfmf, None),)) )
374 self.__addOneMethodCall()
375 mValue = self.__Method(((ValueForMethodForm, None),))
377 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
379 if argsAsSerie: return mValue
380 else: return mValue[-1]
384 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
385 la forme d'une matrice
387 if self.__Matrix is not None:
388 return self.__Matrix.shape
390 raise ValueError("Matrix form of the operator is not available, nor the shape")
392 def nbcalls(self, which=None):
394 Renvoie les nombres d'évaluations de l'opérateur
397 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
398 self.__NbCallsAsMatrix,
399 self.__NbCallsAsMethod,
400 self.__NbCallsOfCached,
401 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
402 Operator.NbCallsAsMatrix,
403 Operator.NbCallsAsMethod,
404 Operator.NbCallsOfCached,
406 if which is None: return __nbcalls
407 else: return __nbcalls[which]
409 def __addOneMatrixCall(self):
410 "Comptabilise un appel"
411 self.__NbCallsAsMatrix += 1 # Decompte local
412 Operator.NbCallsAsMatrix += 1 # Decompte global
414 def __addOneMethodCall(self, nb = 1):
415 "Comptabilise un appel"
416 self.__NbCallsAsMethod += nb # Decompte local
417 Operator.NbCallsAsMethod += nb # Decompte global
419 def __addOneCacheCall(self):
420 "Comptabilise un appel"
421 self.__NbCallsOfCached += 1 # Decompte local
422 Operator.NbCallsOfCached += 1 # Decompte global
424 # ==============================================================================
425 class FullOperator(object):
427 Classe générale d'interface de type opérateur complet
428 (Direct, Linéaire Tangent, Adjoint)
431 "__name", "__check", "__extraArgs", "__FO", "__T",
435 name = "GenericFullOperator",
437 asOneFunction = None, # 1 Fonction
438 asThreeFunctions = None, # 3 Fonctions in a dictionary
439 asScript = None, # 1 or 3 Fonction(s) by script
440 asDict = None, # Parameters
442 extraArguments = None,
443 performancePrf = None,
444 inputAsMF = False,# Fonction(s) as Multi-Functions
449 self.__name = str(name)
450 self.__check = bool(toBeChecked)
451 self.__extraArgs = extraArguments
456 if (asDict is not None) and isinstance(asDict, dict):
457 __Parameters.update( asDict )
458 # Priorité à EnableMultiProcessingInDerivatives=True
459 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
460 __Parameters["EnableMultiProcessingInDerivatives"] = True
461 __Parameters["EnableMultiProcessingInEvaluation"] = False
462 if "EnableMultiProcessingInDerivatives" not in __Parameters:
463 __Parameters["EnableMultiProcessingInDerivatives"] = False
464 if __Parameters["EnableMultiProcessingInDerivatives"]:
465 __Parameters["EnableMultiProcessingInEvaluation"] = False
466 if "EnableMultiProcessingInEvaluation" not in __Parameters:
467 __Parameters["EnableMultiProcessingInEvaluation"] = False
468 if "withIncrement" in __Parameters: # Temporaire
469 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
470 # Le défaut est équivalent à "ReducedOverallRequirements"
471 __reduceM, __avoidRC = True, True
472 if performancePrf is not None:
473 if performancePrf == "ReducedAmountOfCalculation":
474 __reduceM, __avoidRC = False, True
475 elif performancePrf == "ReducedMemoryFootprint":
476 __reduceM, __avoidRC = True, False
477 elif performancePrf == "NoSavings":
478 __reduceM, __avoidRC = False, False
480 if asScript is not None:
481 __Matrix, __Function = None, None
483 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
485 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
486 __Function.update({"useApproximatedDerivatives":True})
487 __Function.update(__Parameters)
488 elif asThreeFunctions:
490 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
491 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
492 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
494 __Function.update(__Parameters)
497 if asOneFunction is not None:
498 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
499 if asOneFunction["Direct"] is not None:
500 __Function = asOneFunction
502 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
504 __Function = { "Direct":asOneFunction }
505 __Function.update({"useApproximatedDerivatives":True})
506 __Function.update(__Parameters)
507 elif asThreeFunctions is not None:
508 if isinstance(asThreeFunctions, dict) and \
509 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
510 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
511 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
512 __Function = asThreeFunctions
513 elif isinstance(asThreeFunctions, dict) and \
514 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
515 __Function = asThreeFunctions
516 __Function.update({"useApproximatedDerivatives":True})
519 "The functions has to be given in a dictionnary which have either"+\
520 " 1 key (\"Direct\") or"+\
521 " 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
522 if "Direct" not in asThreeFunctions:
523 __Function["Direct"] = asThreeFunctions["Tangent"]
524 __Function.update(__Parameters)
528 if appliedInX is not None and isinstance(appliedInX, dict):
529 __appliedInX = appliedInX
530 elif appliedInX is not None:
531 __appliedInX = {"HXb":appliedInX}
535 if scheduledBy is not None:
536 self.__T = scheduledBy
538 if isinstance(__Function, dict) and \
539 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
540 ("Direct" in __Function) and (__Function["Direct"] is not None):
541 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
542 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
543 if "withdX" not in __Function: __Function["withdX"] = None
544 if "withReducingMemoryUse" not in __Function: __Function["withReducingMemoryUse"] = __reduceM
545 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = __avoidRC
546 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
547 if "withLengthOfRedundancy" not in __Function: __Function["withLengthOfRedundancy"] = -1
548 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
549 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
550 from daCore import NumericObjects
551 FDA = NumericObjects.FDApproximation(
553 Function = __Function["Direct"],
554 centeredDF = __Function["CenteredFiniteDifference"],
555 increment = __Function["DifferentialIncrement"],
556 dX = __Function["withdX"],
557 extraArguments = self.__extraArgs,
558 reducingMemoryUse = __Function["withReducingMemoryUse"],
559 avoidingRedundancy = __Function["withAvoidingRedundancy"],
560 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
561 lengthOfRedundancy = __Function["withLengthOfRedundancy"],
562 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
563 mpWorkers = __Function["NumberOfProcesses"],
564 mfEnabled = __Function["withmfEnabled"],
566 self.__FO["Direct"] = Operator(
568 fromMethod = FDA.DirectOperator,
569 reducingMemoryUse = __reduceM,
570 avoidingRedundancy = __avoidRC,
571 inputAsMultiFunction = inputAsMF,
572 extraArguments = self.__extraArgs,
573 enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
574 self.__FO["Tangent"] = Operator(
575 name = self.__name+"Tangent",
576 fromMethod = FDA.TangentOperator,
577 reducingMemoryUse = __reduceM,
578 avoidingRedundancy = __avoidRC,
579 inputAsMultiFunction = inputAsMF,
580 extraArguments = self.__extraArgs )
581 self.__FO["Adjoint"] = Operator(
582 name = self.__name+"Adjoint",
583 fromMethod = FDA.AdjointOperator,
584 reducingMemoryUse = __reduceM,
585 avoidingRedundancy = __avoidRC,
586 inputAsMultiFunction = inputAsMF,
587 extraArguments = self.__extraArgs )
588 elif isinstance(__Function, dict) and \
589 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
590 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
591 self.__FO["Direct"] = Operator(
593 fromMethod = __Function["Direct"],
594 reducingMemoryUse = __reduceM,
595 avoidingRedundancy = __avoidRC,
596 inputAsMultiFunction = inputAsMF,
597 extraArguments = self.__extraArgs,
598 enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
599 self.__FO["Tangent"] = Operator(
600 name = self.__name+"Tangent",
601 fromMethod = __Function["Tangent"],
602 reducingMemoryUse = __reduceM,
603 avoidingRedundancy = __avoidRC,
604 inputAsMultiFunction = inputAsMF,
605 extraArguments = self.__extraArgs )
606 self.__FO["Adjoint"] = Operator(
607 name = self.__name+"Adjoint",
608 fromMethod = __Function["Adjoint"],
609 reducingMemoryUse = __reduceM,
610 avoidingRedundancy = __avoidRC,
611 inputAsMultiFunction = inputAsMF,
612 extraArguments = self.__extraArgs )
613 elif asMatrix is not None:
614 if isinstance(__Matrix, str):
615 __Matrix = PlatformInfo.strmatrix2liststr( __Matrix )
616 __matrice = numpy.asarray( __Matrix, dtype=float )
617 self.__FO["Direct"] = Operator(
619 fromMatrix = __matrice,
620 reducingMemoryUse = __reduceM,
621 avoidingRedundancy = __avoidRC,
622 inputAsMultiFunction = inputAsMF,
623 enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
624 self.__FO["Tangent"] = Operator(
625 name = self.__name+"Tangent",
626 fromMatrix = __matrice,
627 reducingMemoryUse = __reduceM,
628 avoidingRedundancy = __avoidRC,
629 inputAsMultiFunction = inputAsMF )
630 self.__FO["Adjoint"] = Operator(
631 name = self.__name+"Adjoint",
632 fromMatrix = __matrice.T,
633 reducingMemoryUse = __reduceM,
634 avoidingRedundancy = __avoidRC,
635 inputAsMultiFunction = inputAsMF )
639 "The %s object is improperly defined or undefined,"%self.__name+\
640 " it requires at minima either a matrix, a Direct operator for"+\
641 " approximate derivatives or a Tangent/Adjoint operators pair."+\
642 " Please check your operator input.")
644 if __appliedInX is not None:
645 self.__FO["AppliedInX"] = {}
646 for key in __appliedInX:
647 if isinstance(__appliedInX[key], str):
648 __appliedInX[key] = PlatformInfo.strvect2liststr( __appliedInX[key] )
649 self.__FO["AppliedInX"][key] = numpy.ravel( __appliedInX[key] ).reshape((-1,1))
651 self.__FO["AppliedInX"] = None
657 "x.__repr__() <==> repr(x)"
658 return repr(self.__FO)
661 "x.__str__() <==> str(x)"
662 return str(self.__FO)
664 # ==============================================================================
665 class Algorithm(object):
667 Classe générale d'interface de type algorithme
669 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
670 d'assimilation, en fournissant un container (dictionnaire) de variables
671 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
673 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
676 "_name", "_parameters", "__internal_state", "__required_parameters",
677 "_m", "__variable_names_not_public", "__canonical_parameter_name",
678 "__canonical_stored_name", "__replace_by_the_new_name",
682 def __init__(self, name):
684 L'initialisation présente permet de fabriquer des variables de stockage
685 disponibles de manière générique dans les algorithmes élémentaires. Ces
686 variables de stockage sont ensuite conservées dans un dictionnaire
687 interne à l'objet, mais auquel on accède par la méthode "get".
689 Les variables prévues sont :
690 - APosterioriCorrelations : matrice de corrélations de la matrice A
691 - APosterioriCovariance : matrice de covariances a posteriori : A
692 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
693 - APosterioriVariances : vecteur des variances de la matrice A
694 - Analysis : vecteur d'analyse : Xa
695 - BMA : Background moins Analysis : Xa - Xb
696 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
697 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
698 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
699 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
700 - CostFunctionJo : partie observations de la fonction-coût : Jo
701 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
702 - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
703 - CurrentOptimum : état optimal courant lors d'itérations
704 - CurrentState : état courant lors d'itérations
705 - CurrentStepNumber : pas courant d'avancement dans les algorithmes en évolution, à partir de 0
706 - EnsembleOfSimulations : ensemble d'états (sorties, simulations) rangés par colonne dans une matrice
707 - EnsembleOfSnapshots : ensemble d'états rangés par colonne dans une matrice
708 - EnsembleOfStates : ensemble d'états (entrées, paramètres) rangés par colonne dans une matrice
709 - ForecastCovariance : covariance de l'état prédit courant lors d'itérations
710 - ForecastState : état prédit courant lors d'itérations
711 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
712 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
713 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
714 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
715 - Innovation : l'innovation : d = Y - H(X)
716 - InnovationAtCurrentAnalysis : l'innovation à l'état analysé : da = Y - H(Xa)
717 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
718 - InternalCostFunctionJ : ensemble de valeurs internes de fonction-coût J dans un vecteur
719 - InternalCostFunctionJb : ensemble de valeurs internes de fonction-coût Jb dans un vecteur
720 - InternalCostFunctionJb : ensemble de valeurs internes de fonction-coût Jo dans un vecteur
721 - InternalStates : ensemble d'états internes rangés par colonne dans une matrice (=EnsembleOfSnapshots)
722 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
723 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
724 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
725 - KalmanGainAtOptimum : gain de Kalman à l'optimum
726 - MahalanobisConsistency : indicateur de consistance des covariances
727 - OMA : Observation moins Analyse : Y - Xa
728 - OMB : Observation moins Background : Y - Xb
729 - Residu : dans le cas des algorithmes de vérification
730 - SampledStateForQuantiles : échantillons d'états pour l'estimation des quantiles
731 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
732 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
733 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
734 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
735 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
736 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
737 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
738 On peut rajouter des variables à stocker dans l'initialisation de
739 l'algorithme élémentaire qui va hériter de cette classe
741 logging.debug("%s Initialisation", str(name))
742 self._m = PlatformInfo.SystemUsage()
744 self._name = str( name )
745 self._parameters = {"StoreSupplementaryCalculations":[]}
746 self.__internal_state = {}
747 self.__required_parameters = {}
748 self.__required_inputs = {
749 "RequiredInputValues":{"mandatory":(), "optional":()},
750 "ClassificationTags":[],
752 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
753 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
754 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
755 self.__replace_by_the_new_name = {} # Nouveau nom à partir d'un nom ancien
757 self.StoredVariables = {}
758 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
759 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
760 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
761 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
762 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
763 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
764 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
765 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
766 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
767 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
768 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
769 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
770 self.StoredVariables["CurrentEnsembleState"] = Persistence.OneMatrix(name = "CurrentEnsembleState")
771 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
772 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
773 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
774 self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
775 self.StoredVariables["EnsembleOfSimulations"] = Persistence.OneMatrix(name = "EnsembleOfSimulations")
776 self.StoredVariables["EnsembleOfSnapshots"] = Persistence.OneMatrix(name = "EnsembleOfSnapshots")
777 self.StoredVariables["EnsembleOfStates"] = Persistence.OneMatrix(name = "EnsembleOfStates")
778 self.StoredVariables["ExcludedPoints"] = Persistence.OneVector(name = "ExcludedPoints")
779 self.StoredVariables["ForecastCovariance"] = Persistence.OneMatrix(name = "ForecastCovariance")
780 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
781 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
782 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
783 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
784 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
785 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
786 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
787 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
788 self.StoredVariables["InternalCostFunctionJ"] = Persistence.OneVector(name = "InternalCostFunctionJ")
789 self.StoredVariables["InternalCostFunctionJb"] = Persistence.OneVector(name = "InternalCostFunctionJb")
790 self.StoredVariables["InternalCostFunctionJo"] = Persistence.OneVector(name = "InternalCostFunctionJo")
791 self.StoredVariables["InternalStates"] = Persistence.OneMatrix(name = "InternalStates")
792 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
793 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
794 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
795 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
796 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
797 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
798 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
799 self.StoredVariables["OptimalPoints"] = Persistence.OneVector(name = "OptimalPoints")
800 self.StoredVariables["ReducedBasis"] = Persistence.OneMatrix(name = "ReducedBasis")
801 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
802 self.StoredVariables["Residus"] = Persistence.OneVector(name = "Residus")
803 self.StoredVariables["SampledStateForQuantiles"] = Persistence.OneMatrix(name = "SampledStateForQuantiles")
804 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
805 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
806 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
807 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
808 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
809 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
810 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
811 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
813 for k in self.StoredVariables:
814 self.__canonical_stored_name[k.lower()] = k
816 for k, v in self.__variable_names_not_public.items():
817 self.__canonical_parameter_name[k.lower()] = k
818 self.__canonical_parameter_name["algorithm"] = "Algorithm"
819 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
821 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
823 logging.debug("%s Lancement", self._name)
824 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
825 self._getTimeState(reset=True)
827 # Mise à jour des paramètres internes avec le contenu de Parameters, en
828 # reprenant les valeurs par défauts pour toutes celles non définies
829 self.__setParameters(Parameters, reset=True)
830 for k, v in self.__variable_names_not_public.items():
831 if k not in self._parameters: self.__setParameters( {k:v} )
833 # Corrections et compléments des vecteurs
834 def __test_vvalue(argument, variable, argname, symbol=None):
835 if symbol is None: symbol = variable
837 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
838 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol))
839 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
840 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol))
842 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol))
844 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
845 logging.debug("%s %s vector %s is required and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
846 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
847 logging.debug("%s %s vector %s is optional and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
850 "%s %s vector %s is set although neither required nor optional, and its size is %i."%(
851 self._name,argname,symbol,numpy.array(argument).size))
853 __test_vvalue( Xb, "Xb", "Background or initial state" )
854 __test_vvalue( Y, "Y", "Observation" )
855 __test_vvalue( U, "U", "Control" )
857 # Corrections et compléments des covariances
858 def __test_cvalue(argument, variable, argname, symbol=None):
859 if symbol is None: symbol = variable
861 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
862 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol))
863 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
864 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol))
866 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol))
868 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
869 logging.debug("%s %s error covariance matrix %s is required and set."%(self._name,argname,symbol))
870 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
871 logging.debug("%s %s error covariance matrix %s is optional and set."%(self._name,argname,symbol))
873 logging.debug("%s %s error covariance matrix %s is set although neither required nor optional."%(self._name,argname,symbol))
875 __test_cvalue( B, "B", "Background" )
876 __test_cvalue( R, "R", "Observation" )
877 __test_cvalue( Q, "Q", "Evolution" )
879 # Corrections et compléments des opérateurs
880 def __test_ovalue(argument, variable, argname, symbol=None):
881 if symbol is None: symbol = variable
882 if argument is None or (isinstance(argument,dict) and len(argument)==0):
883 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
884 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol))
885 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
886 logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol))
888 logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol))
890 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
891 logging.debug("%s %s operator %s is required and set."%(self._name,argname,symbol))
892 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
893 logging.debug("%s %s operator %s is optional and set."%(self._name,argname,symbol))
895 logging.debug("%s %s operator %s is set although neither required nor optional."%(self._name,argname,symbol))
897 __test_ovalue( HO, "HO", "Observation", "H" )
898 __test_ovalue( EM, "EM", "Evolution", "M" )
899 __test_ovalue( CM, "CM", "Control Model", "C" )
901 # Corrections et compléments des bornes
902 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
903 logging.debug("%s Bounds taken into account"%(self._name,))
905 self._parameters["Bounds"] = None
906 if ("StateBoundsForQuantiles" in self._parameters) \
907 and isinstance(self._parameters["StateBoundsForQuantiles"], (list, tuple)) \
908 and (len(self._parameters["StateBoundsForQuantiles"]) > 0):
909 logging.debug("%s Bounds for quantiles states taken into account"%(self._name,))
910 # Attention : contrairement à Bounds, pas de défaut à None, sinon on ne peut pas être sans bornes
912 # Corrections et compléments de l'initialisation en X
913 if "InitializationPoint" in self._parameters:
915 if self._parameters["InitializationPoint"] is not None and hasattr(self._parameters["InitializationPoint"],'size'):
916 if self._parameters["InitializationPoint"].size != numpy.ravel(Xb).size:
917 raise ValueError("Incompatible size %i of forced initial point that have to replace the background of size %i" \
918 %(self._parameters["InitializationPoint"].size,numpy.ravel(Xb).size))
919 # Obtenu par typecast : numpy.ravel(self._parameters["InitializationPoint"])
921 self._parameters["InitializationPoint"] = numpy.ravel(Xb)
923 if self._parameters["InitializationPoint"] is None:
924 raise ValueError("Forced initial point can not be set without any given Background or required value")
926 # Correction pour pallier a un bug de TNC sur le retour du Minimum
927 if "Minimizer" in self._parameters and self._parameters["Minimizer"] == "TNC":
928 self.setParameterValue("StoreInternalVariables",True)
930 # Verbosité et logging
931 if logging.getLogger().level < logging.WARNING:
932 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
933 self._parameters["optmessages"] = 15
935 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
936 self._parameters["optmessages"] = 0
940 def _post_run(self,_oH=None):
942 if ("StoreSupplementaryCalculations" in self._parameters) and \
943 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
944 for _A in self.StoredVariables["APosterioriCovariance"]:
945 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
946 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
947 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
948 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
949 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
950 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
951 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
952 self.StoredVariables["APosterioriCorrelations"].store( _C )
953 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
955 "%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i",
956 self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
958 "%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i",
959 self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
960 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
961 logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
962 logging.debug("%s Terminé", self._name)
965 def _toStore(self, key):
966 "True if in StoreSupplementaryCalculations, else False"
967 return key in self._parameters["StoreSupplementaryCalculations"]
969 def get(self, key=None):
971 Renvoie l'une des variables stockées identifiée par la clé, ou le
972 dictionnaire de l'ensemble des variables disponibles en l'absence de
973 clé. Ce sont directement les variables sous forme objet qui sont
974 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
975 des classes de persistance.
978 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
980 return self.StoredVariables
982 def __contains__(self, key=None):
983 "D.__contains__(k) -> True if D has a key k, else False"
984 if key is None or key.lower() not in self.__canonical_stored_name:
987 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
990 "D.keys() -> list of D's keys"
991 if hasattr(self, "StoredVariables"):
992 return self.StoredVariables.keys()
997 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
998 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
999 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
1004 raise TypeError("pop expected at least 1 arguments, got 0")
1005 "If key is not found, d is returned if given, otherwise KeyError is raised"
1011 def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
1013 Doit implémenter l'opération élémentaire de calcul algorithmique.
1015 raise NotImplementedError("Mathematical algorithmic calculation has not been implemented!")
1017 def defineRequiredParameter(self,
1029 Permet de définir dans l'algorithme des paramètres requis et leurs
1030 caractéristiques par défaut.
1033 raise ValueError("A name is mandatory to define a required parameter.")
1035 self.__required_parameters[name] = {
1036 "default" : default,
1037 "typecast" : typecast,
1040 "listval" : listval,
1041 "listadv" : listadv,
1042 "message" : message,
1043 "oldname" : oldname,
1045 self.__canonical_parameter_name[name.lower()] = name
1046 if oldname is not None:
1047 self.__canonical_parameter_name[oldname.lower()] = name # Conversion
1048 self.__replace_by_the_new_name[oldname.lower()] = name
1049 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
1051 def getRequiredParameters(self, noDetails=True):
1053 Renvoie la liste des noms de paramètres requis ou directement le
1054 dictionnaire des paramètres requis.
1057 return sorted(self.__required_parameters.keys())
1059 return self.__required_parameters
1061 def setParameterValue(self, name=None, value=None):
1063 Renvoie la valeur d'un paramètre requis de manière contrôlée
1065 __k = self.__canonical_parameter_name[name.lower()]
1066 default = self.__required_parameters[__k]["default"]
1067 typecast = self.__required_parameters[__k]["typecast"]
1068 minval = self.__required_parameters[__k]["minval"]
1069 maxval = self.__required_parameters[__k]["maxval"]
1070 listval = self.__required_parameters[__k]["listval"]
1071 listadv = self.__required_parameters[__k]["listadv"]
1073 if value is None and default is None:
1075 elif value is None and default is not None:
1076 if typecast is None: __val = default
1077 else: __val = typecast( default )
1079 if typecast is None: __val = value
1082 __val = typecast( value )
1084 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
1086 if minval is not None and (numpy.array(__val, float) < minval).any():
1087 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
1088 if maxval is not None and (numpy.array(__val, float) > maxval).any():
1089 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
1090 if listval is not None or listadv is not None:
1091 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
1093 if listval is not None and v in listval: continue
1094 elif listadv is not None and v in listadv: continue
1096 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
1097 elif not (listval is not None and __val in listval) and not (listadv is not None and __val in listadv):
1098 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
1102 def requireInputArguments(self, mandatory=(), optional=()):
1104 Permet d'imposer des arguments de calcul requis en entrée.
1106 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
1107 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
1109 def getInputArguments(self):
1111 Permet d'obtenir les listes des arguments de calcul requis en entrée.
1113 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
1115 def setAttributes(self, tags=()):
1117 Permet d'adjoindre des attributs comme les tags de classification.
1118 Renvoie la liste actuelle dans tous les cas.
1120 self.__required_inputs["ClassificationTags"].extend( tags )
1121 return self.__required_inputs["ClassificationTags"]
1123 def __setParameters(self, fromDico={}, reset=False):
1125 Permet de stocker les paramètres reçus dans le dictionnaire interne.
1127 self._parameters.update( fromDico )
1128 __inverse_fromDico_keys = {}
1129 for k in fromDico.keys():
1130 if k.lower() in self.__canonical_parameter_name:
1131 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
1132 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
1133 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
1135 for k in __inverse_fromDico_keys.values():
1136 if k.lower() in self.__replace_by_the_new_name:
1137 __newk = self.__replace_by_the_new_name[k.lower()]
1138 __msg = "the parameter \"%s\" used in \"%s\" algorithm case is deprecated and has to be replaced by \"%s\"."%(k,self._name,__newk)
1139 __msg += " Please update your code."
1140 warnings.warn(__msg, FutureWarning, stacklevel=50)
1142 for k in self.__required_parameters.keys():
1143 if k in __canonic_fromDico_keys:
1144 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
1146 self._parameters[k] = self.setParameterValue(k)
1149 if hasattr(self._parameters[k],"__len__") and len(self._parameters[k]) > 100:
1150 logging.debug("%s %s de longueur %s", self._name, self.__required_parameters[k]["message"], len(self._parameters[k]))
1152 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
1154 def _setInternalState(self, key=None, value=None, fromDico={}, reset=False):
1156 Permet de stocker des variables nommées constituant l'état interne
1158 if reset: # Vide le dictionnaire préalablement
1159 self.__internal_state = {}
1160 if key is not None and value is not None:
1161 self.__internal_state[key] = value
1162 self.__internal_state.update( dict(fromDico) )
1164 def _getInternalState(self, key=None):
1166 Restitue un état interne sous la forme d'un dictionnaire de variables nommées
1168 if key is not None and key in self.__internal_state:
1169 return self.__internal_state[key]
1171 return self.__internal_state
1173 def _getTimeState(self, reset=False):
1175 Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde
1178 self.__initial_cpu_time = time.process_time()
1179 self.__initial_elapsed_time = time.perf_counter()
1182 self.__cpu_time = time.process_time() - self.__initial_cpu_time
1183 self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time
1184 return self.__cpu_time, self.__elapsed_time
1186 def _StopOnTimeLimit(self, X=None, withReason=False):
1187 "Stop criteria on time limit: True/False [+ Reason]"
1188 c, e = self._getTimeState()
1189 if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]:
1190 __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"])
1191 elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]:
1192 __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"])
1194 __SC, __SR = False, ""
1200 # ==============================================================================
1201 class PartialAlgorithm(object):
1203 Classe pour mimer "Algorithm" du point de vue stockage, mais sans aucune
1204 action avancée comme la vérification . Pour les méthodes reprises ici,
1205 le fonctionnement est identique à celles de la classe "Algorithm".
1208 "_name", "_parameters", "StoredVariables", "__canonical_stored_name",
1211 def __init__(self, name):
1212 self._name = str( name )
1213 self._parameters = {"StoreSupplementaryCalculations":[]}
1215 self.StoredVariables = {}
1216 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
1217 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
1218 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
1219 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
1220 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
1221 self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
1223 self.__canonical_stored_name = {}
1224 for k in self.StoredVariables:
1225 self.__canonical_stored_name[k.lower()] = k
1227 def _toStore(self, key):
1228 "True if in StoreSupplementaryCalculations, else False"
1229 return key in self._parameters["StoreSupplementaryCalculations"]
1231 def get(self, key=None):
1233 Renvoie l'une des variables stockées identifiée par la clé, ou le
1234 dictionnaire de l'ensemble des variables disponibles en l'absence de
1235 clé. Ce sont directement les variables sous forme objet qui sont
1236 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
1237 des classes de persistance.
1240 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
1242 return self.StoredVariables
1244 # ==============================================================================
1245 class AlgorithmAndParameters(object):
1247 Classe générale d'interface d'action pour l'algorithme et ses paramètres
1250 "__name", "__algorithm", "__algorithmFile", "__algorithmName", "__A",
1251 "__P", "__Xb", "__Y", "__U", "__HO", "__EM", "__CM", "__B", "__R",
1252 "__Q", "__variable_names_not_public",
1256 name = "GenericAlgorithm",
1263 self.__name = str(name)
1267 self.__algorithm = {}
1268 self.__algorithmFile = None
1269 self.__algorithmName = None
1271 self.updateParameters( asDict, asScript )
1273 if asAlgorithm is None and asScript is not None:
1274 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1276 __Algo = asAlgorithm
1278 if __Algo is not None:
1279 self.__A = str(__Algo)
1280 self.__P.update( {"Algorithm":self.__A} )
1282 self.__setAlgorithm( self.__A )
1284 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
1286 def updateParameters(self,
1290 "Mise à jour des paramètres"
1291 if asDict is None and asScript is not None:
1292 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1296 if __Dict is not None:
1297 self.__P.update( dict(__Dict) )
1299 def executePythonScheme(self, asDictAO = None):
1300 "Permet de lancer le calcul d'assimilation"
1301 Operator.CM.clearCache()
1303 if not isinstance(asDictAO, dict):
1304 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1305 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
1306 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
1307 else: self.__Xb = None
1308 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
1309 else: self.__Y = asDictAO["Observation"]
1310 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
1311 else: self.__U = asDictAO["ControlInput"]
1312 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
1313 else: self.__HO = asDictAO["ObservationOperator"]
1314 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
1315 else: self.__EM = asDictAO["EvolutionModel"]
1316 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
1317 else: self.__CM = asDictAO["ControlModel"]
1318 self.__B = asDictAO["BackgroundError"]
1319 self.__R = asDictAO["ObservationError"]
1320 self.__Q = asDictAO["EvolutionError"]
1322 self.__shape_validate()
1324 self.__algorithm.run(
1334 Parameters = self.__P,
1338 def executeYACSScheme(self, FileName=None):
1339 "Permet de lancer le calcul d'assimilation"
1340 if FileName is None or not os.path.exists(FileName):
1341 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1343 __file = os.path.abspath(FileName)
1344 logging.debug("The YACS file name is \"%s\"."%__file)
1345 if not PlatformInfo.has_salome or \
1346 not PlatformInfo.has_yacs or \
1347 not PlatformInfo.has_adao:
1348 raise ImportError("\n\n"+\
1349 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
1350 "Please load the right environnement before trying to use it.\n")
1353 import SALOMERuntime
1355 SALOMERuntime.RuntimeSALOME_setRuntime()
1357 r = pilot.getRuntime()
1358 xmlLoader = loader.YACSLoader()
1359 xmlLoader.registerProcCataLoader()
1361 catalogAd = r.loadCatalog("proc", __file)
1362 r.addCatalog(catalogAd)
1367 p = xmlLoader.load(__file)
1368 except IOError as ex:
1369 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1371 logger = p.getLogger("parser")
1372 if not logger.isEmpty():
1373 print("The imported YACS XML schema has errors on parsing:")
1374 print(logger.getStr())
1377 print("The YACS XML schema is not valid and will not be executed:")
1378 print(p.getErrorReport())
1380 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1381 p.checkConsistency(info)
1382 if info.areWarningsOrErrors():
1383 print("The YACS XML schema is not coherent and will not be executed:")
1384 print(info.getGlobalRepr())
1386 e = pilot.ExecutorSwig()
1388 if p.getEffectiveState() != pilot.DONE:
1389 print(p.getErrorReport())
1393 def get(self, key = None):
1394 "Vérifie l'existence d'une clé de variable ou de paramètres"
1395 if key in self.__algorithm:
1396 return self.__algorithm.get( key )
1397 elif key in self.__P:
1398 return self.__P[key]
1400 allvariables = self.__P
1401 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1404 def pop(self, k, d):
1405 "Necessaire pour le pickling"
1406 return self.__algorithm.pop(k, d)
1408 def getAlgorithmRequiredParameters(self, noDetails=True):
1409 "Renvoie la liste des paramètres requis selon l'algorithme"
1410 return self.__algorithm.getRequiredParameters(noDetails)
1412 def getAlgorithmInputArguments(self):
1413 "Renvoie la liste des entrées requises selon l'algorithme"
1414 return self.__algorithm.getInputArguments()
1416 def getAlgorithmAttributes(self):
1417 "Renvoie la liste des attributs selon l'algorithme"
1418 return self.__algorithm.setAttributes()
1420 def setObserver(self, __V, __O, __I, __S):
1421 if self.__algorithm is None \
1422 or isinstance(self.__algorithm, dict) \
1423 or not hasattr(self.__algorithm,"StoredVariables"):
1424 raise ValueError("No observer can be build before choosing an algorithm.")
1425 if __V not in self.__algorithm:
1426 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1428 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1431 HookParameters = __I,
1434 def removeObserver(self, __V, __O, __A = False):
1435 if self.__algorithm is None \
1436 or isinstance(self.__algorithm, dict) \
1437 or not hasattr(self.__algorithm,"StoredVariables"):
1438 raise ValueError("No observer can be removed before choosing an algorithm.")
1439 if __V not in self.__algorithm:
1440 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1442 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1447 def hasObserver(self, __V):
1448 if self.__algorithm is None \
1449 or isinstance(self.__algorithm, dict) \
1450 or not hasattr(self.__algorithm,"StoredVariables"):
1452 if __V not in self.__algorithm:
1454 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1457 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1458 for k in self.__variable_names_not_public:
1459 if k in __allvariables: __allvariables.remove(k)
1460 return __allvariables
1462 def __contains__(self, key=None):
1463 "D.__contains__(k) -> True if D has a key k, else False"
1464 return key in self.__algorithm or key in self.__P
1467 "x.__repr__() <==> repr(x)"
1468 return repr(self.__A)+", "+repr(self.__P)
1471 "x.__str__() <==> str(x)"
1472 return str(self.__A)+", "+str(self.__P)
1474 def __setAlgorithm(self, choice = None ):
1476 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1477 d'assimilation. L'argument est un champ caractère se rapportant au nom
1478 d'un algorithme réalisant l'opération sur les arguments fixes.
1481 raise ValueError("Error: algorithm choice has to be given")
1482 if self.__algorithmName is not None:
1483 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1484 daDirectory = "daAlgorithms"
1486 # Recherche explicitement le fichier complet
1487 # ------------------------------------------
1489 for directory in sys.path:
1490 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1491 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1492 if module_path is None:
1494 "No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1496 # Importe le fichier complet comme un module
1497 # ------------------------------------------
1499 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1500 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1501 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1502 raise ImportError("this module does not define a valid elementary algorithm.")
1503 self.__algorithmName = str(choice)
1504 sys.path = sys_path_tmp ; del sys_path_tmp
1505 except ImportError as e:
1507 "The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1509 # Instancie un objet du type élémentaire du fichier
1510 # -------------------------------------------------
1511 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1514 def __shape_validate(self):
1516 Validation de la correspondance correcte des tailles des variables et
1517 des matrices s'il y en a.
1519 if self.__Xb is None: __Xb_shape = (0,)
1520 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1521 elif hasattr(self.__Xb,"shape"):
1522 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1523 else: __Xb_shape = self.__Xb.shape()
1524 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1526 if self.__Y is None: __Y_shape = (0,)
1527 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1528 elif hasattr(self.__Y,"shape"):
1529 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1530 else: __Y_shape = self.__Y.shape()
1531 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1533 if self.__U is None: __U_shape = (0,)
1534 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1535 elif hasattr(self.__U,"shape"):
1536 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1537 else: __U_shape = self.__U.shape()
1538 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1540 if self.__B is None: __B_shape = (0,0)
1541 elif hasattr(self.__B,"shape"):
1542 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1543 else: __B_shape = self.__B.shape()
1544 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1546 if self.__R is None: __R_shape = (0,0)
1547 elif hasattr(self.__R,"shape"):
1548 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1549 else: __R_shape = self.__R.shape()
1550 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1552 if self.__Q is None: __Q_shape = (0,0)
1553 elif hasattr(self.__Q,"shape"):
1554 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1555 else: __Q_shape = self.__Q.shape()
1556 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1558 if len(self.__HO) == 0: __HO_shape = (0,0)
1559 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1560 elif hasattr(self.__HO["Direct"],"shape"):
1561 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1562 else: __HO_shape = self.__HO["Direct"].shape()
1563 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1565 if len(self.__EM) == 0: __EM_shape = (0,0)
1566 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1567 elif hasattr(self.__EM["Direct"],"shape"):
1568 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1569 else: __EM_shape = self.__EM["Direct"].shape()
1570 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1572 if len(self.__CM) == 0: __CM_shape = (0,0)
1573 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1574 elif hasattr(self.__CM["Direct"],"shape"):
1575 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1576 else: __CM_shape = self.__CM["Direct"].shape()
1577 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1579 # Vérification des conditions
1580 # ---------------------------
1581 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1582 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1583 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1584 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1586 if not( min(__B_shape) == max(__B_shape) ):
1587 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1588 if not( min(__R_shape) == max(__R_shape) ):
1589 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1590 if not( min(__Q_shape) == max(__Q_shape) ):
1591 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1592 if not( min(__EM_shape) == max(__EM_shape) ):
1593 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1595 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1597 "Shape characteristic of observation operator (H)"+\
1598 " \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1599 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1601 "Shape characteristic of observation operator (H)"+\
1602 " \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1603 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1605 "Shape characteristic of observation operator (H)"+\
1606 " \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1607 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1609 "Shape characteristic of observation operator (H)"+\
1610 " \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1612 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1613 if self.__algorithmName in ["EnsembleBlue",]:
1614 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1615 self.__Xb = Persistence.OneVector("Background")
1616 for member in asPersistentVector:
1617 self.__Xb.store( numpy.asarray(member, dtype=float) )
1618 __Xb_shape = min(__B_shape)
1621 "Shape characteristic of a priori errors covariance matrix (B)"+\
1622 " \"%s\" and background vector (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1624 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1626 "Shape characteristic of observation errors covariance matrix (R)"+\
1627 " \"%s\" and observation vector (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1629 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1631 "Shape characteristic of evolution model (EM)"+\
1632 " \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1634 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1636 "Shape characteristic of control model (CM)"+\
1637 " \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1639 if ("Bounds" in self.__P) \
1640 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1641 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1642 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1643 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1645 if ("StateBoundsForQuantiles" in self.__P) \
1646 and (isinstance(self.__P["StateBoundsForQuantiles"], list) or isinstance(self.__P["StateBoundsForQuantiles"], tuple)) \
1647 and (len(self.__P["StateBoundsForQuantiles"]) != max(__Xb_shape)):
1648 raise ValueError("The number \"%s\" of bound pairs for the quantile state (X) components is different of the size \"%s\" of the state itself." \
1649 %(len(self.__P["StateBoundsForQuantiles"]),max(__Xb_shape)))
1653 # ==============================================================================
1654 class RegulationAndParameters(object):
1656 Classe générale d'interface d'action pour la régulation et ses paramètres
1658 __slots__ = ("__name", "__P")
1661 name = "GenericRegulation",
1668 self.__name = str(name)
1671 if asAlgorithm is None and asScript is not None:
1672 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1674 __Algo = asAlgorithm
1676 if asDict is None and asScript is not None:
1677 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1681 if __Dict is not None:
1682 self.__P.update( dict(__Dict) )
1684 if __Algo is not None:
1685 self.__P.update( {"Algorithm":str(__Algo)} )
1687 def get(self, key = None):
1688 "Vérifie l'existence d'une clé de variable ou de paramètres"
1690 return self.__P[key]
1694 # ==============================================================================
1695 class DataObserver(object):
1697 Classe générale d'interface de type observer
1699 __slots__ = ("__name", "__V", "__O", "__I")
1702 name = "GenericObserver",
1714 self.__name = str(name)
1719 if onVariable is None:
1720 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1721 elif type(onVariable) in (tuple, list):
1722 self.__V = tuple(map( str, onVariable ))
1723 if withInfo is None:
1726 self.__I = (str(withInfo),)*len(self.__V)
1727 elif isinstance(onVariable, str):
1728 self.__V = (onVariable,)
1729 if withInfo is None:
1730 self.__I = (onVariable,)
1732 self.__I = (str(withInfo),)
1734 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1736 if asObsObject is not None:
1737 self.__O = asObsObject
1739 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1740 __Function = Observer2Func(__FunctionText)
1741 self.__O = __Function.getfunc()
1743 for k in range(len(self.__V)):
1746 if ename not in withAlgo:
1747 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1749 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1752 "x.__repr__() <==> repr(x)"
1753 return repr(self.__V)+"\n"+repr(self.__O)
1756 "x.__str__() <==> str(x)"
1757 return str(self.__V)+"\n"+str(self.__O)
1759 # ==============================================================================
1760 class UserScript(object):
1762 Classe générale d'interface de type texte de script utilisateur
1764 __slots__ = ("__name", "__F")
1767 name = "GenericUserScript",
1774 self.__name = str(name)
1776 if asString is not None:
1778 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1779 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1780 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1781 self.__F = Templates.ObserverTemplates[asTemplate]
1782 elif asScript is not None:
1783 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1788 "x.__repr__() <==> repr(x)"
1789 return repr(self.__F)
1792 "x.__str__() <==> str(x)"
1793 return str(self.__F)
1795 # ==============================================================================
1796 class ExternalParameters(object):
1798 Classe générale d'interface pour le stockage des paramètres externes
1800 __slots__ = ("__name", "__P")
1803 name = "GenericExternalParameters",
1809 self.__name = str(name)
1812 self.updateParameters( asDict, asScript )
1814 def updateParameters(self,
1818 "Mise à jour des paramètres"
1819 if asDict is None and asScript is not None:
1820 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1824 if __Dict is not None:
1825 self.__P.update( dict(__Dict) )
1827 def get(self, key = None):
1829 return self.__P[key]
1831 return list(self.__P.keys())
1834 return list(self.__P.keys())
1836 def pop(self, k, d):
1837 return self.__P.pop(k, d)
1840 return self.__P.items()
1842 def __contains__(self, key=None):
1843 "D.__contains__(k) -> True if D has a key k, else False"
1844 return key in self.__P
1846 # ==============================================================================
1847 class State(object):
1849 Classe générale d'interface de type état
1852 "__name", "__check", "__V", "__T", "__is_vector", "__is_series",
1857 name = "GenericVector",
1859 asPersistentVector = None,
1865 toBeChecked = False,
1868 Permet de définir un vecteur :
1869 - asVector : entrée des données, comme un vecteur compatible avec le
1870 constructeur de numpy.matrix, ou "True" si entrée par script.
1871 - asPersistentVector : entrée des données, comme une série de vecteurs
1872 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1873 type Persistence, ou "True" si entrée par script.
1874 - asScript : si un script valide est donné contenant une variable
1875 nommée "name", la variable est de type "asVector" (par défaut) ou
1876 "asPersistentVector" selon que l'une de ces variables est placée à
1878 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1879 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1880 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1881 nommée "name"), on récupère les colonnes et on les range ligne après
1882 ligne (colMajor=False, par défaut) ou colonne après colonne
1883 (colMajor=True). La variable résultante est de type "asVector" (par
1884 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1887 self.__name = str(name)
1888 self.__check = bool(toBeChecked)
1892 self.__is_vector = False
1893 self.__is_series = False
1895 if asScript is not None:
1896 __Vector, __Series = None, None
1897 if asPersistentVector:
1898 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1900 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1901 elif asDataFile is not None:
1902 __Vector, __Series = None, None
1903 if asPersistentVector:
1904 if colNames is not None:
1905 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1907 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1908 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1909 __Series = numpy.transpose(__Series)
1910 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1911 __Series = numpy.transpose(__Series)
1913 if colNames is not None:
1914 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1916 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1918 __Vector = numpy.ravel(__Vector, order = "F")
1920 __Vector = numpy.ravel(__Vector, order = "C")
1922 __Vector, __Series = asVector, asPersistentVector
1924 if __Vector is not None:
1925 self.__is_vector = True
1926 if isinstance(__Vector, str):
1927 __Vector = PlatformInfo.strvect2liststr( __Vector )
1928 self.__V = numpy.ravel(numpy.asarray( __Vector, dtype=float )).reshape((-1,1))
1929 self.shape = self.__V.shape
1930 self.size = self.__V.size
1931 elif __Series is not None:
1932 self.__is_series = True
1933 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1934 self.__V = Persistence.OneVector(self.__name)
1935 if isinstance(__Series, str):
1936 __Series = PlatformInfo.strmatrix2liststr(__Series)
1937 for member in __Series:
1938 if isinstance(member, str):
1939 member = PlatformInfo.strvect2liststr( member )
1940 self.__V.store(numpy.asarray( member, dtype=float ))
1943 if isinstance(self.__V.shape, (tuple, list)):
1944 self.shape = self.__V.shape
1946 self.shape = self.__V.shape()
1947 if len(self.shape) == 1:
1948 self.shape = (self.shape[0],1)
1949 self.size = self.shape[0] * self.shape[1]
1952 "The %s object is improperly defined or undefined,"%self.__name+\
1953 " it requires at minima either a vector, a list/tuple of"+\
1954 " vectors or a persistent object. Please check your vector input.")
1956 if scheduledBy is not None:
1957 self.__T = scheduledBy
1959 def getO(self, withScheduler=False):
1961 return self.__V, self.__T
1962 elif self.__T is None:
1968 "Vérification du type interne"
1969 return self.__is_vector
1972 "Vérification du type interne"
1973 return self.__is_series
1976 "x.__repr__() <==> repr(x)"
1977 return repr(self.__V)
1980 "x.__str__() <==> str(x)"
1981 return str(self.__V)
1983 # ==============================================================================
1984 class Covariance(object):
1986 Classe générale d'interface de type covariance
1989 "__name", "__check", "__C", "__is_scalar", "__is_vector", "__is_matrix",
1990 "__is_object", "shape", "size",
1994 name = "GenericCovariance",
1995 asCovariance = None,
1996 asEyeByScalar = None,
1997 asEyeByVector = None,
2000 toBeChecked = False,
2003 Permet de définir une covariance :
2004 - asCovariance : entrée des données, comme une matrice compatible avec
2005 le constructeur de numpy.matrix
2006 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
2007 multiplicatif d'une matrice de corrélation identité, aucune matrice
2008 n'étant donc explicitement à donner
2009 - asEyeByVector : entrée des données comme un seul vecteur de variance,
2010 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
2011 n'étant donc explicitement à donner
2012 - asCovObject : entrée des données comme un objet python, qui a les
2013 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
2014 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
2015 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
2016 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
2017 pleine doit être vérifié
2019 self.__name = str(name)
2020 self.__check = bool(toBeChecked)
2023 self.__is_scalar = False
2024 self.__is_vector = False
2025 self.__is_matrix = False
2026 self.__is_object = False
2028 if asScript is not None:
2029 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
2031 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2033 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2035 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2037 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2039 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
2041 if __Scalar is not None:
2042 if isinstance(__Scalar, str):
2043 __Scalar = PlatformInfo.strvect2liststr( __Scalar )
2044 if len(__Scalar) > 0: __Scalar = __Scalar[0]
2045 if numpy.array(__Scalar).size != 1:
2047 " The diagonal multiplier given to define a sparse matrix is"+\
2048 " not a unique scalar value.\n Its actual measured size is"+\
2049 " %i. Please check your scalar input."%numpy.array(__Scalar).size)
2050 self.__is_scalar = True
2051 self.__C = numpy.abs( float(__Scalar) )
2054 elif __Vector is not None:
2055 if isinstance(__Vector, str):
2056 __Vector = PlatformInfo.strvect2liststr( __Vector )
2057 self.__is_vector = True
2058 self.__C = numpy.abs( numpy.ravel(numpy.asarray( __Vector, dtype=float )) )
2059 self.shape = (self.__C.size,self.__C.size)
2060 self.size = self.__C.size**2
2061 elif __Matrix is not None:
2062 self.__is_matrix = True
2063 self.__C = numpy.matrix( __Matrix, float )
2064 self.shape = self.__C.shape
2065 self.size = self.__C.size
2066 elif __Object is not None:
2067 self.__is_object = True
2069 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__matmul__","__mul__","__rmatmul__","__rmul__"):
2070 if not hasattr(self.__C,at):
2071 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
2072 if hasattr(self.__C,"shape"):
2073 self.shape = self.__C.shape
2076 if hasattr(self.__C,"size"):
2077 self.size = self.__C.size
2085 def __validate(self):
2087 if self.__C is None:
2088 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
2089 if self.ismatrix() and min(self.shape) != max(self.shape):
2090 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
2091 if self.isobject() and min(self.shape) != max(self.shape):
2092 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
2093 if self.isscalar() and self.__C <= 0:
2094 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
2095 if self.isvector() and (self.__C <= 0).any():
2096 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
2097 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
2099 numpy.linalg.cholesky( self.__C )
2101 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
2102 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
2106 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
2109 "Vérification du type interne"
2110 return self.__is_scalar
2113 "Vérification du type interne"
2114 return self.__is_vector
2117 "Vérification du type interne"
2118 return self.__is_matrix
2121 "Vérification du type interne"
2122 return self.__is_object
2127 return Covariance(self.__name+"I", asCovariance = numpy.linalg.inv(self.__C) )
2128 elif self.isvector():
2129 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
2130 elif self.isscalar():
2131 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
2132 elif self.isobject() and hasattr(self.__C,"getI"):
2133 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
2135 return None # Indispensable
2140 return Covariance(self.__name+"T", asCovariance = self.__C.T )
2141 elif self.isvector():
2142 return Covariance(self.__name+"T", asEyeByVector = self.__C )
2143 elif self.isscalar():
2144 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
2145 elif self.isobject() and hasattr(self.__C,"getT"):
2146 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
2148 raise AttributeError("the %s covariance matrix has no getT attribute."%(self.__name,))
2151 "Décomposition de Cholesky"
2153 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
2154 elif self.isvector():
2155 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
2156 elif self.isscalar():
2157 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
2158 elif self.isobject() and hasattr(self.__C,"cholesky"):
2159 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
2161 raise AttributeError("the %s covariance matrix has no cholesky attribute."%(self.__name,))
2163 def choleskyI(self):
2164 "Inversion de la décomposition de Cholesky"
2166 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.linalg.cholesky(self.__C)) )
2167 elif self.isvector():
2168 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
2169 elif self.isscalar():
2170 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
2171 elif self.isobject() and hasattr(self.__C,"choleskyI"):
2172 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
2174 raise AttributeError("the %s covariance matrix has no choleskyI attribute."%(self.__name,))
2177 "Racine carrée matricielle"
2180 return Covariance(self.__name+"C", asCovariance = numpy.real(scipy.linalg.sqrtm(self.__C)) )
2181 elif self.isvector():
2182 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
2183 elif self.isscalar():
2184 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
2185 elif self.isobject() and hasattr(self.__C,"sqrtm"):
2186 return Covariance(self.__name+"C", asCovObject = self.__C.sqrtm() )
2188 raise AttributeError("the %s covariance matrix has no sqrtm attribute."%(self.__name,))
2191 "Inversion de la racine carrée matricielle"
2194 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.real(scipy.linalg.sqrtm(self.__C))) )
2195 elif self.isvector():
2196 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
2197 elif self.isscalar():
2198 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
2199 elif self.isobject() and hasattr(self.__C,"sqrtmI"):
2200 return Covariance(self.__name+"H", asCovObject = self.__C.sqrtmI() )
2202 raise AttributeError("the %s covariance matrix has no sqrtmI attribute."%(self.__name,))
2204 def diag(self, msize=None):
2205 "Diagonale de la matrice"
2207 return numpy.diag(self.__C)
2208 elif self.isvector():
2210 elif self.isscalar():
2212 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2214 return self.__C * numpy.ones(int(msize))
2215 elif self.isobject() and hasattr(self.__C,"diag"):
2216 return self.__C.diag()
2218 raise AttributeError("the %s covariance matrix has no diag attribute."%(self.__name,))
2220 def trace(self, msize=None):
2221 "Trace de la matrice"
2223 return numpy.trace(self.__C)
2224 elif self.isvector():
2225 return float(numpy.sum(self.__C))
2226 elif self.isscalar():
2228 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2230 return self.__C * int(msize)
2231 elif self.isobject():
2232 return self.__C.trace()
2234 raise AttributeError("the %s covariance matrix has no trace attribute."%(self.__name,))
2236 def asfullmatrix(self, msize=None):
2239 return numpy.asarray(self.__C, dtype=float)
2240 elif self.isvector():
2241 return numpy.asarray( numpy.diag(self.__C), dtype=float )
2242 elif self.isscalar():
2244 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2246 return numpy.asarray( self.__C * numpy.eye(int(msize)), dtype=float )
2247 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
2248 return self.__C.asfullmatrix()
2250 raise AttributeError("the %s covariance matrix has no asfullmatrix attribute."%(self.__name,))
2252 def assparsematrix(self):
2260 "x.__repr__() <==> repr(x)"
2261 return repr(self.__C)
2264 "x.__str__() <==> str(x)"
2265 return str(self.__C)
2267 def __add__(self, other):
2268 "x.__add__(y) <==> x+y"
2269 if self.ismatrix() or self.isobject():
2270 return self.__C + numpy.asmatrix(other)
2271 elif self.isvector() or self.isscalar():
2272 _A = numpy.asarray(other)
2273 if len(_A.shape) == 1:
2274 _A.reshape((-1,1))[::2] += self.__C
2276 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
2277 return numpy.asmatrix(_A)
2279 def __radd__(self, other):
2280 "x.__radd__(y) <==> y+x"
2281 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
2283 def __sub__(self, other):
2284 "x.__sub__(y) <==> x-y"
2285 if self.ismatrix() or self.isobject():
2286 return self.__C - numpy.asmatrix(other)
2287 elif self.isvector() or self.isscalar():
2288 _A = numpy.asarray(other)
2289 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
2290 return numpy.asmatrix(_A)
2292 def __rsub__(self, other):
2293 "x.__rsub__(y) <==> y-x"
2294 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
2297 "x.__neg__() <==> -x"
2300 def __matmul__(self, other):
2301 "x.__mul__(y) <==> x@y"
2302 if self.ismatrix() and isinstance(other, (int, float)):
2303 return numpy.asarray(self.__C) * other
2304 elif self.ismatrix() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2305 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2306 return numpy.ravel(self.__C @ numpy.ravel(other))
2307 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2308 return numpy.asarray(self.__C) @ numpy.asarray(other)
2310 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asarray(other).shape,self.__name))
2311 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2312 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2313 return numpy.ravel(self.__C) * numpy.ravel(other)
2314 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2315 return numpy.ravel(self.__C).reshape((-1,1)) * numpy.asarray(other)
2317 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2318 elif self.isscalar() and isinstance(other,numpy.matrix):
2319 return numpy.asarray(self.__C * other)
2320 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2321 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2322 return self.__C * numpy.ravel(other)
2324 return self.__C * numpy.asarray(other)
2325 elif self.isobject():
2326 return self.__C.__matmul__(other)
2328 raise NotImplementedError("%s covariance matrix __matmul__ method not available for %s type!"%(self.__name,type(other)))
2330 def __mul__(self, other):
2331 "x.__mul__(y) <==> x*y"
2332 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2333 return self.__C * other
2334 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2335 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2336 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2337 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2338 return self.__C * numpy.asmatrix(other)
2341 "operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
2342 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2343 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2344 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
2345 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2346 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
2349 "operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2350 elif self.isscalar() and isinstance(other,numpy.matrix):
2351 return self.__C * other
2352 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2353 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2354 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2356 return self.__C * numpy.asmatrix(other)
2357 elif self.isobject():
2358 return self.__C.__mul__(other)
2360 raise NotImplementedError(
2361 "%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
2363 def __rmatmul__(self, other):
2364 "x.__rmul__(y) <==> y@x"
2365 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2366 return other * self.__C
2367 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2368 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2369 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2370 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2371 return numpy.asmatrix(other) * self.__C
2374 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2375 elif self.isvector() and isinstance(other,numpy.matrix):
2376 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2377 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2378 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2379 return numpy.asmatrix(numpy.array(other) * self.__C)
2382 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2383 elif self.isscalar() and isinstance(other,numpy.matrix):
2384 return other * self.__C
2385 elif self.isobject():
2386 return self.__C.__rmatmul__(other)
2388 raise NotImplementedError(
2389 "%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name,type(other)))
2391 def __rmul__(self, other):
2392 "x.__rmul__(y) <==> y*x"
2393 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2394 return other * self.__C
2395 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2396 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2397 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2398 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2399 return numpy.asmatrix(other) * self.__C
2402 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2403 elif self.isvector() and isinstance(other,numpy.matrix):
2404 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2405 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2406 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2407 return numpy.asmatrix(numpy.array(other) * self.__C)
2410 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2411 elif self.isscalar() and isinstance(other,numpy.matrix):
2412 return other * self.__C
2413 elif self.isscalar() and isinstance(other,float):
2414 return other * self.__C
2415 elif self.isobject():
2416 return self.__C.__rmul__(other)
2418 raise NotImplementedError(
2419 "%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
2422 "x.__len__() <==> len(x)"
2423 return self.shape[0]
2425 # ==============================================================================
2426 class Observer2Func(object):
2428 Création d'une fonction d'observateur a partir de son texte
2430 __slots__ = ("__corps")
2432 def __init__(self, corps=""):
2433 self.__corps = corps
2434 def func(self,var,info):
2435 "Fonction d'observation"
2438 "Restitution du pointeur de fonction dans l'objet"
2441 # ==============================================================================
2442 class CaseLogger(object):
2444 Conservation des commandes de création d'un cas
2447 "__name", "__objname", "__logSerie", "__switchoff", "__viewers",
2451 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
2452 self.__name = str(__name)
2453 self.__objname = str(__objname)
2454 self.__logSerie = []
2455 self.__switchoff = False
2457 "TUI" :Interfaces._TUIViewer,
2458 "SCD" :Interfaces._SCDViewer,
2459 "YACS":Interfaces._YACSViewer,
2460 "SimpleReportInRst":Interfaces._SimpleReportInRstViewer,
2461 "SimpleReportInHtml":Interfaces._SimpleReportInHtmlViewer,
2462 "SimpleReportInPlainTxt":Interfaces._SimpleReportInPlainTxtViewer,
2465 "TUI" :Interfaces._TUIViewer,
2466 "COM" :Interfaces._COMViewer,
2468 if __addViewers is not None:
2469 self.__viewers.update(dict(__addViewers))
2470 if __addLoaders is not None:
2471 self.__loaders.update(dict(__addLoaders))
2473 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
2474 "Enregistrement d'une commande individuelle"
2475 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
2476 if "self" in __keys: __keys.remove("self")
2477 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
2479 self.__switchoff = True
2481 self.__switchoff = False
2483 def dump(self, __filename=None, __format="TUI", __upa=""):
2484 "Restitution normalisée des commandes"
2485 if __format in self.__viewers:
2486 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2488 raise ValueError("Dumping as \"%s\" is not available"%__format)
2489 return __formater.dump(__filename, __upa)
2491 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2492 "Chargement normalisé des commandes"
2493 if __format in self.__loaders:
2494 __formater = self.__loaders[__format]()
2496 raise ValueError("Loading as \"%s\" is not available"%__format)
2497 return __formater.load(__filename, __content, __object)
2499 # ==============================================================================
2502 _extraArguments = None,
2503 _sFunction = lambda x: x,
2508 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2509 correspondante de valeurs de la fonction en argument
2511 # Vérifications et définitions initiales
2512 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2513 if not PlatformInfo.isIterable( __xserie ):
2514 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2516 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2519 __mpWorkers = int(_mpWorkers)
2521 import multiprocessing
2532 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2533 with multiprocessing.Pool(__mpWorkers) as pool:
2534 __multiHX = pool.map( _sFunction, _jobs )
2537 # logging.debug("MULTF Internal multiprocessing calculation end")
2539 # logging.debug("MULTF Internal monoprocessing calculation begin")
2541 if _extraArguments is None:
2542 for __xvalue in __xserie:
2543 __multiHX.append( _sFunction( __xvalue ) )
2544 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2545 for __xvalue in __xserie:
2546 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2547 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2548 for __xvalue in __xserie:
2549 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2551 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2552 # logging.debug("MULTF Internal monoprocessing calculation end")
2554 # logging.debug("MULTF Internal multifonction calculations end")
2557 # ==============================================================================
2558 if __name__ == "__main__":
2559 print('\n AUTODIAGNOSTIC\n')