1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2023 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
36 from functools import partial
37 from daCore import Persistence, PlatformInfo, Interfaces
38 from daCore import Templates
40 # ==============================================================================
41 class CacheManager(object):
43 Classe générale de gestion d'un cache de calculs
46 "__tolerBP", "__lengthOR", "__initlnOR", "__seenNames", "__enabled",
51 toleranceInRedundancy = 1.e-18,
52 lengthOfRedundancy = -1,
55 Les caractéristiques de tolérance peuvent être modifiées à la création.
57 self.__tolerBP = float(toleranceInRedundancy)
58 self.__lengthOR = int(lengthOfRedundancy)
59 self.__initlnOR = self.__lengthOR
69 def wasCalculatedIn(self, xValue, oName="" ):
70 "Vérifie l'existence d'un calcul correspondant à la valeur"
74 for i in range(min(len(self.__listOPCV),self.__lengthOR)-1,-1,-1):
75 if not hasattr(xValue, 'size'):
77 elif (str(oName) != self.__listOPCV[i][3]):
79 elif (xValue.size != self.__listOPCV[i][0].size):
81 elif (numpy.ravel(xValue)[0] - self.__listOPCV[i][0][0]) > (self.__tolerBP * self.__listOPCV[i][2] / self.__listOPCV[i][0].size):
83 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < (self.__tolerBP * self.__listOPCV[i][2]):
85 __HxV = self.__listOPCV[i][1]
89 def storeValueInX(self, xValue, HxValue, oName="" ):
90 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
91 if self.__lengthOR < 0:
92 self.__lengthOR = 2 * min(numpy.size(xValue), 50) + 2
93 self.__initlnOR = self.__lengthOR
94 self.__seenNames.append(str(oName))
95 if str(oName) not in self.__seenNames: # Etend la liste si nouveau
96 self.__lengthOR += 2 * min(numpy.size(xValue), 50) + 2
97 self.__initlnOR += self.__lengthOR
98 self.__seenNames.append(str(oName))
99 while len(self.__listOPCV) > self.__lengthOR:
100 self.__listOPCV.pop(0)
101 self.__listOPCV.append( (
102 copy.copy(numpy.ravel(xValue)), # 0 Previous point
103 copy.copy(HxValue), # 1 Previous value
104 numpy.linalg.norm(xValue), # 2 Norm
105 str(oName), # 3 Operator name
110 self.__initlnOR = self.__lengthOR
112 self.__enabled = False
116 self.__lengthOR = self.__initlnOR
117 self.__enabled = True
119 # ==============================================================================
120 class Operator(object):
122 Classe générale d'interface de type opérateur simple
125 "__name", "__NbCallsAsMatrix", "__NbCallsAsMethod",
126 "__NbCallsOfCached", "__reduceM", "__avoidRC", "__inputAsMF",
127 "__mpEnabled", "__extraArgs", "__Method", "__Matrix", "__Type",
136 name = "GenericOperator",
139 avoidingRedundancy = True,
140 reducingMemoryUse = False,
141 inputAsMultiFunction = False,
142 enableMultiProcess = False,
143 extraArguments = None,
146 On construit un objet de ce type en fournissant, à l'aide de l'un des
147 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
150 - name : nom d'opérateur
151 - fromMethod : argument de type fonction Python
152 - fromMatrix : argument adapté au constructeur numpy.array/matrix
153 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
154 - reducingMemoryUse : booléen forçant (ou pas) des calculs moins
156 - inputAsMultiFunction : booléen indiquant une fonction explicitement
157 définie (ou pas) en multi-fonction
158 - extraArguments : arguments supplémentaires passés à la fonction de
159 base et ses dérivées (tuple ou dictionnaire)
161 self.__name = str(name)
162 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
163 self.__reduceM = bool( reducingMemoryUse )
164 self.__avoidRC = bool( avoidingRedundancy )
165 self.__inputAsMF = bool( inputAsMultiFunction )
166 self.__mpEnabled = bool( enableMultiProcess )
167 self.__extraArgs = extraArguments
168 if fromMethod is not None and self.__inputAsMF:
169 self.__Method = fromMethod # logtimer(fromMethod)
171 self.__Type = "Method"
172 elif fromMethod is not None and not self.__inputAsMF:
173 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
175 self.__Type = "Method"
176 elif fromMatrix is not None:
178 if isinstance(fromMatrix, str):
179 fromMatrix = PlatformInfo.strmatrix2liststr( fromMatrix )
180 self.__Matrix = numpy.asarray( fromMatrix, dtype=float )
181 self.__Type = "Matrix"
187 def disableAvoidingRedundancy(self):
189 Operator.CM.disable()
191 def enableAvoidingRedundancy(self):
196 Operator.CM.disable()
202 def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False):
204 Permet de restituer le résultat de l'application de l'opérateur à une
205 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
206 argument devant a priori être du bon type.
208 - les arguments par série sont :
209 - xValue : argument adapté pour appliquer l'opérateur
210 - HValue : valeur précalculée de l'opérateur en ce point
211 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
218 if HValue is not None:
222 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
224 if _HValue is not None:
225 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
227 for i in range(len(_HValue)):
228 _HxValue.append( _HValue[i] )
230 Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name)
235 for i, xv in enumerate(_xValue):
237 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
239 __alreadyCalculated = False
241 if __alreadyCalculated:
242 self.__addOneCacheCall()
245 if self.__Matrix is not None:
246 self.__addOneMatrixCall()
247 _hv = self.__Matrix @ numpy.ravel(xv)
249 self.__addOneMethodCall()
253 _HxValue.append( _hv )
255 if len(_xserie)>0 and self.__Matrix is None:
256 if self.__extraArgs is None:
257 _hserie = self.__Method( _xserie ) # Calcul MF
259 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
260 if not hasattr(_hserie, "pop"):
262 "The user input multi-function doesn't seem to return a"+\
263 " result sequence, behaving like a mono-function. It has"+\
271 Operator.CM.storeValueInX(_xv,_hv,self.__name)
273 if returnSerieAsArrayMatrix:
274 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
276 if argsAsSerie: return _HxValue
277 else: return _HxValue[-1]
279 def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
281 Permet de restituer le résultat de l'application de l'opérateur à des
282 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
283 argument devant a priori être du bon type. Si la uValue est None,
284 on suppose que l'opérateur ne s'applique qu'à xValue.
286 - paires : les arguments par paire sont :
287 - xValue : argument X adapté pour appliquer l'opérateur
288 - uValue : argument U adapté pour appliquer l'opérateur
289 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
291 if argsAsSerie: _xuValue = paires
292 else: _xuValue = (paires,)
293 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
295 if self.__Matrix is not None:
297 for paire in _xuValue:
298 _xValue, _uValue = paire
299 self.__addOneMatrixCall()
300 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
303 for paire in _xuValue:
304 _xValue, _uValue = paire
305 if _uValue is not None:
306 _xuArgs.append( paire )
308 _xuArgs.append( _xValue )
309 self.__addOneMethodCall( len(_xuArgs) )
310 if self.__extraArgs is None:
311 _HxValue = self.__Method( _xuArgs ) # Calcul MF
313 _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF
315 if returnSerieAsArrayMatrix:
316 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
318 if argsAsSerie: return _HxValue
319 else: return _HxValue[-1]
321 def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
323 Permet de restituer le résultat de l'application de l'opérateur à une
324 série d'arguments xValue, sachant que l'opérateur est valable en
325 xNominal. Cette méthode se contente d'appliquer, son argument devant a
326 priori être du bon type. Si l'opérateur est linéaire car c'est une
327 matrice, alors il est valable en tout point nominal et xNominal peut
328 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
329 permet d'indiquer que l'argument est multi-paires.
331 - paires : les arguments par paire sont :
332 - xNominal : série d'arguments permettant de donner le point où
333 l'opérateur est construit pour être ensuite appliqué
334 - xValue : série d'arguments adaptés pour appliquer l'opérateur
335 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
337 if argsAsSerie: _nxValue = paires
338 else: _nxValue = (paires,)
339 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
341 if self.__Matrix is not None:
343 for paire in _nxValue:
344 _xNominal, _xValue = paire
345 self.__addOneMatrixCall()
346 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
348 self.__addOneMethodCall( len(_nxValue) )
349 if self.__extraArgs is None:
350 _HxValue = self.__Method( _nxValue ) # Calcul MF
352 _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
354 if returnSerieAsArrayMatrix:
355 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
357 if argsAsSerie: return _HxValue
358 else: return _HxValue[-1]
360 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
362 Permet de renvoyer l'opérateur sous la forme d'une matrice
364 if self.__Matrix is not None:
365 self.__addOneMatrixCall()
366 mValue = [self.__Matrix,]
367 elif not isinstance(ValueForMethodForm,str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
370 self.__addOneMethodCall( len(ValueForMethodForm) )
371 for _vfmf in ValueForMethodForm:
372 mValue.append( self.__Method(((_vfmf, None),)) )
374 self.__addOneMethodCall()
375 mValue = self.__Method(((ValueForMethodForm, None),))
377 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
379 if argsAsSerie: return mValue
380 else: return mValue[-1]
384 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
385 la forme d'une matrice
387 if self.__Matrix is not None:
388 return self.__Matrix.shape
390 raise ValueError("Matrix form of the operator is not available, nor the shape")
392 def nbcalls(self, which=None):
394 Renvoie les nombres d'évaluations de l'opérateur
397 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
398 self.__NbCallsAsMatrix,
399 self.__NbCallsAsMethod,
400 self.__NbCallsOfCached,
401 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
402 Operator.NbCallsAsMatrix,
403 Operator.NbCallsAsMethod,
404 Operator.NbCallsOfCached,
406 if which is None: return __nbcalls
407 else: return __nbcalls[which]
409 def __addOneMatrixCall(self):
410 "Comptabilise un appel"
411 self.__NbCallsAsMatrix += 1 # Decompte local
412 Operator.NbCallsAsMatrix += 1 # Decompte global
414 def __addOneMethodCall(self, nb = 1):
415 "Comptabilise un appel"
416 self.__NbCallsAsMethod += nb # Decompte local
417 Operator.NbCallsAsMethod += nb # Decompte global
419 def __addOneCacheCall(self):
420 "Comptabilise un appel"
421 self.__NbCallsOfCached += 1 # Decompte local
422 Operator.NbCallsOfCached += 1 # Decompte global
424 # ==============================================================================
425 class FullOperator(object):
427 Classe générale d'interface de type opérateur complet
428 (Direct, Linéaire Tangent, Adjoint)
431 "__name", "__check", "__extraArgs", "__FO", "__T",
435 name = "GenericFullOperator",
437 asOneFunction = None, # 1 Fonction
438 asThreeFunctions = None, # 3 Fonctions in a dictionary
439 asScript = None, # 1 or 3 Fonction(s) by script
440 asDict = None, # Parameters
442 extraArguments = None,
443 performancePrf = None,
444 inputAsMF = False,# Fonction(s) as Multi-Functions
449 self.__name = str(name)
450 self.__check = bool(toBeChecked)
451 self.__extraArgs = extraArguments
456 if (asDict is not None) and isinstance(asDict, dict):
457 __Parameters.update( asDict )
458 # Priorité à EnableMultiProcessingInDerivatives=True
459 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
460 __Parameters["EnableMultiProcessingInDerivatives"] = True
461 __Parameters["EnableMultiProcessingInEvaluation"] = False
462 if "EnableMultiProcessingInDerivatives" not in __Parameters:
463 __Parameters["EnableMultiProcessingInDerivatives"] = False
464 if __Parameters["EnableMultiProcessingInDerivatives"]:
465 __Parameters["EnableMultiProcessingInEvaluation"] = False
466 if "EnableMultiProcessingInEvaluation" not in __Parameters:
467 __Parameters["EnableMultiProcessingInEvaluation"] = False
468 if "withIncrement" in __Parameters: # Temporaire
469 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
470 # Le défaut est équivalent à "ReducedOverallRequirements"
471 __reduceM, __avoidRC = True, True
472 if performancePrf is not None:
473 if performancePrf == "ReducedAmountOfCalculation":
474 __reduceM, __avoidRC = False, True
475 elif performancePrf == "ReducedMemoryFootprint":
476 __reduceM, __avoidRC = True, False
477 elif performancePrf == "NoSavings":
478 __reduceM, __avoidRC = False, False
480 if asScript is not None:
481 __Matrix, __Function = None, None
483 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
485 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
486 __Function.update({"useApproximatedDerivatives":True})
487 __Function.update(__Parameters)
488 elif asThreeFunctions:
490 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
491 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
492 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
494 __Function.update(__Parameters)
497 if asOneFunction is not None:
498 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
499 if asOneFunction["Direct"] is not None:
500 __Function = asOneFunction
502 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
504 __Function = { "Direct":asOneFunction }
505 __Function.update({"useApproximatedDerivatives":True})
506 __Function.update(__Parameters)
507 elif asThreeFunctions is not None:
508 if isinstance(asThreeFunctions, dict) and \
509 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
510 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
511 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
512 __Function = asThreeFunctions
513 elif isinstance(asThreeFunctions, dict) and \
514 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
515 __Function = asThreeFunctions
516 __Function.update({"useApproximatedDerivatives":True})
519 "The functions has to be given in a dictionnary which have either"+\
520 " 1 key (\"Direct\") or"+\
521 " 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
522 if "Direct" not in asThreeFunctions:
523 __Function["Direct"] = asThreeFunctions["Tangent"]
524 __Function.update(__Parameters)
528 if appliedInX is not None and isinstance(appliedInX, dict):
529 __appliedInX = appliedInX
530 elif appliedInX is not None:
531 __appliedInX = {"HXb":appliedInX}
535 if scheduledBy is not None:
536 self.__T = scheduledBy
538 if isinstance(__Function, dict) and \
539 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
540 ("Direct" in __Function) and (__Function["Direct"] is not None):
541 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
542 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
543 if "withdX" not in __Function: __Function["withdX"] = None
544 if "withReducingMemoryUse" not in __Function: __Function["withReducingMemoryUse"] = __reduceM
545 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = __avoidRC
546 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
547 if "withLengthOfRedundancy" not in __Function: __Function["withLengthOfRedundancy"] = -1
548 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
549 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
550 from daCore import NumericObjects
551 FDA = NumericObjects.FDApproximation(
553 Function = __Function["Direct"],
554 centeredDF = __Function["CenteredFiniteDifference"],
555 increment = __Function["DifferentialIncrement"],
556 dX = __Function["withdX"],
557 extraArguments = self.__extraArgs,
558 reducingMemoryUse = __Function["withReducingMemoryUse"],
559 avoidingRedundancy = __Function["withAvoidingRedundancy"],
560 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
561 lengthOfRedundancy = __Function["withLengthOfRedundancy"],
562 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
563 mpWorkers = __Function["NumberOfProcesses"],
564 mfEnabled = __Function["withmfEnabled"],
566 self.__FO["Direct"] = Operator(
568 fromMethod = FDA.DirectOperator,
569 reducingMemoryUse = __reduceM,
570 avoidingRedundancy = __avoidRC,
571 inputAsMultiFunction = inputAsMF,
572 extraArguments = self.__extraArgs,
573 enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
574 self.__FO["Tangent"] = Operator(
575 name = self.__name+"Tangent",
576 fromMethod = FDA.TangentOperator,
577 reducingMemoryUse = __reduceM,
578 avoidingRedundancy = __avoidRC,
579 inputAsMultiFunction = inputAsMF,
580 extraArguments = self.__extraArgs )
581 self.__FO["Adjoint"] = Operator(
582 name = self.__name+"Adjoint",
583 fromMethod = FDA.AdjointOperator,
584 reducingMemoryUse = __reduceM,
585 avoidingRedundancy = __avoidRC,
586 inputAsMultiFunction = inputAsMF,
587 extraArguments = self.__extraArgs )
588 elif isinstance(__Function, dict) and \
589 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
590 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
591 self.__FO["Direct"] = Operator(
593 fromMethod = __Function["Direct"],
594 reducingMemoryUse = __reduceM,
595 avoidingRedundancy = __avoidRC,
596 inputAsMultiFunction = inputAsMF,
597 extraArguments = self.__extraArgs,
598 enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
599 self.__FO["Tangent"] = Operator(
600 name = self.__name+"Tangent",
601 fromMethod = __Function["Tangent"],
602 reducingMemoryUse = __reduceM,
603 avoidingRedundancy = __avoidRC,
604 inputAsMultiFunction = inputAsMF,
605 extraArguments = self.__extraArgs )
606 self.__FO["Adjoint"] = Operator(
607 name = self.__name+"Adjoint",
608 fromMethod = __Function["Adjoint"],
609 reducingMemoryUse = __reduceM,
610 avoidingRedundancy = __avoidRC,
611 inputAsMultiFunction = inputAsMF,
612 extraArguments = self.__extraArgs )
613 elif asMatrix is not None:
614 if isinstance(__Matrix, str):
615 __Matrix = PlatformInfo.strmatrix2liststr( __Matrix )
616 __matrice = numpy.asarray( __Matrix, dtype=float )
617 self.__FO["Direct"] = Operator(
619 fromMatrix = __matrice,
620 reducingMemoryUse = __reduceM,
621 avoidingRedundancy = __avoidRC,
622 inputAsMultiFunction = inputAsMF,
623 enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
624 self.__FO["Tangent"] = Operator(
625 name = self.__name+"Tangent",
626 fromMatrix = __matrice,
627 reducingMemoryUse = __reduceM,
628 avoidingRedundancy = __avoidRC,
629 inputAsMultiFunction = inputAsMF )
630 self.__FO["Adjoint"] = Operator(
631 name = self.__name+"Adjoint",
632 fromMatrix = __matrice.T,
633 reducingMemoryUse = __reduceM,
634 avoidingRedundancy = __avoidRC,
635 inputAsMultiFunction = inputAsMF )
639 "The %s object is improperly defined or undefined,"%self.__name+\
640 " it requires at minima either a matrix, a Direct operator for"+\
641 " approximate derivatives or a Tangent/Adjoint operators pair."+\
642 " Please check your operator input.")
644 if __appliedInX is not None:
645 self.__FO["AppliedInX"] = {}
646 for key in __appliedInX:
647 if isinstance(__appliedInX[key], str):
648 __appliedInX[key] = PlatformInfo.strvect2liststr( __appliedInX[key] )
649 self.__FO["AppliedInX"][key] = numpy.ravel( __appliedInX[key] ).reshape((-1,1))
651 self.__FO["AppliedInX"] = None
657 "x.__repr__() <==> repr(x)"
658 return repr(self.__FO)
661 "x.__str__() <==> str(x)"
662 return str(self.__FO)
664 # ==============================================================================
665 class Algorithm(object):
667 Classe générale d'interface de type algorithme
669 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
670 d'assimilation, en fournissant un container (dictionnaire) de variables
671 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
673 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
676 "_name", "_parameters", "__internal_state", "__required_parameters",
677 "_m", "__variable_names_not_public", "__canonical_parameter_name",
678 "__canonical_stored_name", "__replace_by_the_new_name",
682 def __init__(self, name):
684 L'initialisation présente permet de fabriquer des variables de stockage
685 disponibles de manière générique dans les algorithmes élémentaires. Ces
686 variables de stockage sont ensuite conservées dans un dictionnaire
687 interne à l'objet, mais auquel on accède par la méthode "get".
689 Les variables prévues sont :
690 - APosterioriCorrelations : matrice de corrélations de la matrice A
691 - APosterioriCovariance : matrice de covariances a posteriori : A
692 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
693 - APosterioriVariances : vecteur des variances de la matrice A
694 - Analysis : vecteur d'analyse : Xa
695 - BMA : Background moins Analysis : Xa - Xb
696 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
697 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
698 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
699 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
700 - CostFunctionJo : partie observations de la fonction-coût : Jo
701 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
702 - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
703 - CurrentOptimum : état optimal courant lors d'itérations
704 - CurrentState : état courant lors d'itérations
705 - CurrentStepNumber : pas courant d'avancement dans les algorithmes en évolution, à partir de 0
706 - EnsembleOfSimulations : ensemble d'états (sorties, simulations) rangés par colonne dans une matrice
707 - EnsembleOfSnapshots : ensemble d'états rangés par colonne dans une matrice
708 - EnsembleOfStates : ensemble d'états (entrées, paramètres) rangés par colonne dans une matrice
709 - ForecastCovariance : covariance de l'état prédit courant lors d'itérations
710 - ForecastState : état prédit courant lors d'itérations
711 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
712 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
713 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
714 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
715 - Innovation : l'innovation : d = Y - H(X)
716 - InnovationAtCurrentAnalysis : l'innovation à l'état analysé : da = Y - H(Xa)
717 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
718 - InternalCostFunctionJ : ensemble de valeurs internes de fonction-coût J dans un vecteur
719 - InternalCostFunctionJb : ensemble de valeurs internes de fonction-coût Jb dans un vecteur
720 - InternalCostFunctionJb : ensemble de valeurs internes de fonction-coût Jo dans un vecteur
721 - InternalStates : ensemble d'états internes rangés par colonne dans une matrice (=EnsembleOfSnapshots)
722 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
723 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
724 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
725 - KalmanGainAtOptimum : gain de Kalman à l'optimum
726 - MahalanobisConsistency : indicateur de consistance des covariances
727 - OMA : Observation moins Analyse : Y - Xa
728 - OMB : Observation moins Background : Y - Xb
729 - ReducedCoordinates : coordonnées dans la base réduite
730 - Residu : dans le cas des algorithmes de vérification
731 - SampledStateForQuantiles : échantillons d'états pour l'estimation des quantiles
732 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
733 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
734 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
735 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
736 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
737 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
738 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
739 On peut rajouter des variables à stocker dans l'initialisation de
740 l'algorithme élémentaire qui va hériter de cette classe
742 logging.debug("%s Initialisation", str(name))
743 self._m = PlatformInfo.SystemUsage()
745 self._name = str( name )
746 self._parameters = {"StoreSupplementaryCalculations":[]}
747 self.__internal_state = {}
748 self.__required_parameters = {}
749 self.__required_inputs = {
750 "RequiredInputValues":{"mandatory":(), "optional":()},
751 "ClassificationTags":[],
753 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
754 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
755 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
756 self.__replace_by_the_new_name = {} # Nouveau nom à partir d'un nom ancien
758 self.StoredVariables = {}
759 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
760 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
761 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
762 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
763 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
764 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
765 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
766 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
767 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
768 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
769 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
770 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
771 self.StoredVariables["CurrentEnsembleState"] = Persistence.OneMatrix(name = "CurrentEnsembleState")
772 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
773 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
774 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
775 self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
776 self.StoredVariables["EnsembleOfSimulations"] = Persistence.OneMatrix(name = "EnsembleOfSimulations")
777 self.StoredVariables["EnsembleOfSnapshots"] = Persistence.OneMatrix(name = "EnsembleOfSnapshots")
778 self.StoredVariables["EnsembleOfStates"] = Persistence.OneMatrix(name = "EnsembleOfStates")
779 self.StoredVariables["ExcludedPoints"] = Persistence.OneVector(name = "ExcludedPoints")
780 self.StoredVariables["ForecastCovariance"] = Persistence.OneMatrix(name = "ForecastCovariance")
781 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
782 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
783 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
784 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
785 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
786 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
787 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
788 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
789 self.StoredVariables["InternalCostFunctionJ"] = Persistence.OneVector(name = "InternalCostFunctionJ")
790 self.StoredVariables["InternalCostFunctionJb"] = Persistence.OneVector(name = "InternalCostFunctionJb")
791 self.StoredVariables["InternalCostFunctionJo"] = Persistence.OneVector(name = "InternalCostFunctionJo")
792 self.StoredVariables["InternalStates"] = Persistence.OneMatrix(name = "InternalStates")
793 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
794 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
795 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
796 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
797 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
798 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
799 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
800 self.StoredVariables["OptimalPoints"] = Persistence.OneVector(name = "OptimalPoints")
801 self.StoredVariables["ReducedBasis"] = Persistence.OneMatrix(name = "ReducedBasis")
802 self.StoredVariables["ReducedCoordinates"] = Persistence.OneVector(name = "ReducedCoordinates")
803 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
804 self.StoredVariables["Residus"] = Persistence.OneVector(name = "Residus")
805 self.StoredVariables["SampledStateForQuantiles"] = Persistence.OneMatrix(name = "SampledStateForQuantiles")
806 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
807 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
808 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
809 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
810 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
811 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
812 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
813 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
815 for k in self.StoredVariables:
816 self.__canonical_stored_name[k.lower()] = k
818 for k, v in self.__variable_names_not_public.items():
819 self.__canonical_parameter_name[k.lower()] = k
820 self.__canonical_parameter_name["algorithm"] = "Algorithm"
821 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
823 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
825 logging.debug("%s Lancement", self._name)
826 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
827 self._getTimeState(reset=True)
829 # Mise à jour des paramètres internes avec le contenu de Parameters, en
830 # reprenant les valeurs par défauts pour toutes celles non définies
831 self.__setParameters(Parameters, reset=True)
832 for k, v in self.__variable_names_not_public.items():
833 if k not in self._parameters: self.__setParameters( {k:v} )
835 # Corrections et compléments des vecteurs
836 def __test_vvalue(argument, variable, argname, symbol=None):
837 if symbol is None: symbol = variable
839 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
840 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol))
841 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
842 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol))
844 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol))
846 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
847 logging.debug("%s %s vector %s is required and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
848 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
849 logging.debug("%s %s vector %s is optional and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
852 "%s %s vector %s is set although neither required nor optional, and its size is %i."%(
853 self._name,argname,symbol,numpy.array(argument).size))
855 __test_vvalue( Xb, "Xb", "Background or initial state" )
856 __test_vvalue( Y, "Y", "Observation" )
857 __test_vvalue( U, "U", "Control" )
859 # Corrections et compléments des covariances
860 def __test_cvalue(argument, variable, argname, symbol=None):
861 if symbol is None: symbol = variable
863 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
864 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol))
865 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
866 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol))
868 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol))
870 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
871 logging.debug("%s %s error covariance matrix %s is required and set."%(self._name,argname,symbol))
872 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
873 logging.debug("%s %s error covariance matrix %s is optional and set."%(self._name,argname,symbol))
875 logging.debug("%s %s error covariance matrix %s is set although neither required nor optional."%(self._name,argname,symbol))
877 __test_cvalue( B, "B", "Background" )
878 __test_cvalue( R, "R", "Observation" )
879 __test_cvalue( Q, "Q", "Evolution" )
881 # Corrections et compléments des opérateurs
882 def __test_ovalue(argument, variable, argname, symbol=None):
883 if symbol is None: symbol = variable
884 if argument is None or (isinstance(argument,dict) and len(argument)==0):
885 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
886 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol))
887 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
888 logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol))
890 logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol))
892 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
893 logging.debug("%s %s operator %s is required and set."%(self._name,argname,symbol))
894 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
895 logging.debug("%s %s operator %s is optional and set."%(self._name,argname,symbol))
897 logging.debug("%s %s operator %s is set although neither required nor optional."%(self._name,argname,symbol))
899 __test_ovalue( HO, "HO", "Observation", "H" )
900 __test_ovalue( EM, "EM", "Evolution", "M" )
901 __test_ovalue( CM, "CM", "Control Model", "C" )
903 # Corrections et compléments des bornes
904 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
905 logging.debug("%s Bounds taken into account"%(self._name,))
907 self._parameters["Bounds"] = None
908 if ("StateBoundsForQuantiles" in self._parameters) \
909 and isinstance(self._parameters["StateBoundsForQuantiles"], (list, tuple)) \
910 and (len(self._parameters["StateBoundsForQuantiles"]) > 0):
911 logging.debug("%s Bounds for quantiles states taken into account"%(self._name,))
912 # Attention : contrairement à Bounds, pas de défaut à None, sinon on ne peut pas être sans bornes
914 # Corrections et compléments de l'initialisation en X
915 if "InitializationPoint" in self._parameters:
917 if self._parameters["InitializationPoint"] is not None and hasattr(self._parameters["InitializationPoint"],'size'):
918 if self._parameters["InitializationPoint"].size != numpy.ravel(Xb).size:
919 raise ValueError("Incompatible size %i of forced initial point that have to replace the background of size %i" \
920 %(self._parameters["InitializationPoint"].size,numpy.ravel(Xb).size))
921 # Obtenu par typecast : numpy.ravel(self._parameters["InitializationPoint"])
923 self._parameters["InitializationPoint"] = numpy.ravel(Xb)
925 if self._parameters["InitializationPoint"] is None:
926 raise ValueError("Forced initial point can not be set without any given Background or required value")
928 # Correction pour pallier a un bug de TNC sur le retour du Minimum
929 if "Minimizer" in self._parameters and self._parameters["Minimizer"] == "TNC":
930 self.setParameterValue("StoreInternalVariables",True)
932 # Verbosité et logging
933 if logging.getLogger().level < logging.WARNING:
934 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
935 self._parameters["optmessages"] = 15
937 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
938 self._parameters["optmessages"] = 0
942 def _post_run(self,_oH=None):
944 if ("StoreSupplementaryCalculations" in self._parameters) and \
945 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
946 for _A in self.StoredVariables["APosterioriCovariance"]:
947 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
948 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
949 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
950 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
951 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
952 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
953 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
954 self.StoredVariables["APosterioriCorrelations"].store( _C )
955 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
957 "%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i",
958 self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
960 "%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i",
961 self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
962 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
963 logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
964 logging.debug("%s Terminé", self._name)
967 def _toStore(self, key):
968 "True if in StoreSupplementaryCalculations, else False"
969 return key in self._parameters["StoreSupplementaryCalculations"]
971 def get(self, key=None):
973 Renvoie l'une des variables stockées identifiée par la clé, ou le
974 dictionnaire de l'ensemble des variables disponibles en l'absence de
975 clé. Ce sont directement les variables sous forme objet qui sont
976 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
977 des classes de persistance.
980 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
982 return self.StoredVariables
984 def __contains__(self, key=None):
985 "D.__contains__(k) -> True if D has a key k, else False"
986 if key is None or key.lower() not in self.__canonical_stored_name:
989 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
992 "D.keys() -> list of D's keys"
993 if hasattr(self, "StoredVariables"):
994 return self.StoredVariables.keys()
999 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
1000 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
1001 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
1006 raise TypeError("pop expected at least 1 arguments, got 0")
1007 "If key is not found, d is returned if given, otherwise KeyError is raised"
1013 def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
1015 Doit implémenter l'opération élémentaire de calcul algorithmique.
1017 raise NotImplementedError("Mathematical algorithmic calculation has not been implemented!")
1019 def defineRequiredParameter(self,
1031 Permet de définir dans l'algorithme des paramètres requis et leurs
1032 caractéristiques par défaut.
1035 raise ValueError("A name is mandatory to define a required parameter.")
1037 self.__required_parameters[name] = {
1038 "default" : default,
1039 "typecast" : typecast,
1042 "listval" : listval,
1043 "listadv" : listadv,
1044 "message" : message,
1045 "oldname" : oldname,
1047 self.__canonical_parameter_name[name.lower()] = name
1048 if oldname is not None:
1049 self.__canonical_parameter_name[oldname.lower()] = name # Conversion
1050 self.__replace_by_the_new_name[oldname.lower()] = name
1051 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
1053 def getRequiredParameters(self, noDetails=True):
1055 Renvoie la liste des noms de paramètres requis ou directement le
1056 dictionnaire des paramètres requis.
1059 return sorted(self.__required_parameters.keys())
1061 return self.__required_parameters
1063 def setParameterValue(self, name=None, value=None):
1065 Renvoie la valeur d'un paramètre requis de manière contrôlée
1067 __k = self.__canonical_parameter_name[name.lower()]
1068 default = self.__required_parameters[__k]["default"]
1069 typecast = self.__required_parameters[__k]["typecast"]
1070 minval = self.__required_parameters[__k]["minval"]
1071 maxval = self.__required_parameters[__k]["maxval"]
1072 listval = self.__required_parameters[__k]["listval"]
1073 listadv = self.__required_parameters[__k]["listadv"]
1075 if value is None and default is None:
1077 elif value is None and default is not None:
1078 if typecast is None: __val = default
1079 else: __val = typecast( default )
1081 if typecast is None: __val = value
1084 __val = typecast( value )
1086 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
1088 if minval is not None and (numpy.array(__val, float) < minval).any():
1089 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
1090 if maxval is not None and (numpy.array(__val, float) > maxval).any():
1091 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
1092 if listval is not None or listadv is not None:
1093 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
1095 if listval is not None and v in listval: continue
1096 elif listadv is not None and v in listadv: continue
1098 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
1099 elif not (listval is not None and __val in listval) and not (listadv is not None and __val in listadv):
1100 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
1104 def requireInputArguments(self, mandatory=(), optional=()):
1106 Permet d'imposer des arguments de calcul requis en entrée.
1108 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
1109 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
1111 def getInputArguments(self):
1113 Permet d'obtenir les listes des arguments de calcul requis en entrée.
1115 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
1117 def setAttributes(self, tags=()):
1119 Permet d'adjoindre des attributs comme les tags de classification.
1120 Renvoie la liste actuelle dans tous les cas.
1122 self.__required_inputs["ClassificationTags"].extend( tags )
1123 return self.__required_inputs["ClassificationTags"]
1125 def __setParameters(self, fromDico={}, reset=False):
1127 Permet de stocker les paramètres reçus dans le dictionnaire interne.
1129 self._parameters.update( fromDico )
1130 __inverse_fromDico_keys = {}
1131 for k in fromDico.keys():
1132 if k.lower() in self.__canonical_parameter_name:
1133 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
1134 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
1135 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
1137 for k in __inverse_fromDico_keys.values():
1138 if k.lower() in self.__replace_by_the_new_name:
1139 __newk = self.__replace_by_the_new_name[k.lower()]
1140 __msg = "the parameter \"%s\" used in \"%s\" algorithm case is deprecated and has to be replaced by \"%s\"."%(k,self._name,__newk)
1141 __msg += " Please update your code."
1142 warnings.warn(__msg, FutureWarning, stacklevel=50)
1144 for k in self.__required_parameters.keys():
1145 if k in __canonic_fromDico_keys:
1146 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
1148 self._parameters[k] = self.setParameterValue(k)
1151 if hasattr(self._parameters[k],"__len__") and len(self._parameters[k]) > 100:
1152 logging.debug("%s %s de longueur %s", self._name, self.__required_parameters[k]["message"], len(self._parameters[k]))
1154 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
1156 def _setInternalState(self, key=None, value=None, fromDico={}, reset=False):
1158 Permet de stocker des variables nommées constituant l'état interne
1160 if reset: # Vide le dictionnaire préalablement
1161 self.__internal_state = {}
1162 if key is not None and value is not None:
1163 self.__internal_state[key] = value
1164 self.__internal_state.update( dict(fromDico) )
1166 def _getInternalState(self, key=None):
1168 Restitue un état interne sous la forme d'un dictionnaire de variables nommées
1170 if key is not None and key in self.__internal_state:
1171 return self.__internal_state[key]
1173 return self.__internal_state
1175 def _getTimeState(self, reset=False):
1177 Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde
1180 self.__initial_cpu_time = time.process_time()
1181 self.__initial_elapsed_time = time.perf_counter()
1184 self.__cpu_time = time.process_time() - self.__initial_cpu_time
1185 self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time
1186 return self.__cpu_time, self.__elapsed_time
1188 def _StopOnTimeLimit(self, X=None, withReason=False):
1189 "Stop criteria on time limit: True/False [+ Reason]"
1190 c, e = self._getTimeState()
1191 if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]:
1192 __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"])
1193 elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]:
1194 __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"])
1196 __SC, __SR = False, ""
1202 # ==============================================================================
1203 class PartialAlgorithm(object):
1205 Classe pour mimer "Algorithm" du point de vue stockage, mais sans aucune
1206 action avancée comme la vérification . Pour les méthodes reprises ici,
1207 le fonctionnement est identique à celles de la classe "Algorithm".
1210 "_name", "_parameters", "StoredVariables", "__canonical_stored_name",
1213 def __init__(self, name):
1214 self._name = str( name )
1215 self._parameters = {"StoreSupplementaryCalculations":[]}
1217 self.StoredVariables = {}
1218 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
1219 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
1220 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
1221 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
1222 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
1223 self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
1225 self.__canonical_stored_name = {}
1226 for k in self.StoredVariables:
1227 self.__canonical_stored_name[k.lower()] = k
1229 def _toStore(self, key):
1230 "True if in StoreSupplementaryCalculations, else False"
1231 return key in self._parameters["StoreSupplementaryCalculations"]
1233 def get(self, key=None):
1235 Renvoie l'une des variables stockées identifiée par la clé, ou le
1236 dictionnaire de l'ensemble des variables disponibles en l'absence de
1237 clé. Ce sont directement les variables sous forme objet qui sont
1238 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
1239 des classes de persistance.
1242 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
1244 return self.StoredVariables
1246 # ==============================================================================
1247 class AlgorithmAndParameters(object):
1249 Classe générale d'interface d'action pour l'algorithme et ses paramètres
1252 "__name", "__algorithm", "__algorithmFile", "__algorithmName", "__A",
1253 "__P", "__Xb", "__Y", "__U", "__HO", "__EM", "__CM", "__B", "__R",
1254 "__Q", "__variable_names_not_public",
1258 name = "GenericAlgorithm",
1265 self.__name = str(name)
1269 self.__algorithm = {}
1270 self.__algorithmFile = None
1271 self.__algorithmName = None
1273 self.updateParameters( asDict, asScript )
1275 if asAlgorithm is None and asScript is not None:
1276 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1278 __Algo = asAlgorithm
1280 if __Algo is not None:
1281 self.__A = str(__Algo)
1282 self.__P.update( {"Algorithm":self.__A} )
1284 self.__setAlgorithm( self.__A )
1286 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
1288 def updateParameters(self,
1292 "Mise à jour des paramètres"
1293 if asDict is None and asScript is not None:
1294 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1298 if __Dict is not None:
1299 self.__P.update( dict(__Dict) )
1301 def executePythonScheme(self, asDictAO = None):
1302 "Permet de lancer le calcul d'assimilation"
1303 Operator.CM.clearCache()
1305 if not isinstance(asDictAO, dict):
1306 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1307 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
1308 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
1309 else: self.__Xb = None
1310 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
1311 else: self.__Y = asDictAO["Observation"]
1312 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
1313 else: self.__U = asDictAO["ControlInput"]
1314 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
1315 else: self.__HO = asDictAO["ObservationOperator"]
1316 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
1317 else: self.__EM = asDictAO["EvolutionModel"]
1318 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
1319 else: self.__CM = asDictAO["ControlModel"]
1320 self.__B = asDictAO["BackgroundError"]
1321 self.__R = asDictAO["ObservationError"]
1322 self.__Q = asDictAO["EvolutionError"]
1324 self.__shape_validate()
1326 self.__algorithm.run(
1336 Parameters = self.__P,
1340 def executeYACSScheme(self, FileName=None):
1341 "Permet de lancer le calcul d'assimilation"
1342 if FileName is None or not os.path.exists(FileName):
1343 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1345 __file = os.path.abspath(FileName)
1346 logging.debug("The YACS file name is \"%s\"."%__file)
1347 if not PlatformInfo.has_salome or \
1348 not PlatformInfo.has_yacs or \
1349 not PlatformInfo.has_adao:
1350 raise ImportError("\n\n"+\
1351 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
1352 "Please load the right environnement before trying to use it.\n")
1355 import SALOMERuntime
1357 SALOMERuntime.RuntimeSALOME_setRuntime()
1359 r = pilot.getRuntime()
1360 xmlLoader = loader.YACSLoader()
1361 xmlLoader.registerProcCataLoader()
1363 catalogAd = r.loadCatalog("proc", __file)
1364 r.addCatalog(catalogAd)
1369 p = xmlLoader.load(__file)
1370 except IOError as ex:
1371 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1373 logger = p.getLogger("parser")
1374 if not logger.isEmpty():
1375 print("The imported YACS XML schema has errors on parsing:")
1376 print(logger.getStr())
1379 print("The YACS XML schema is not valid and will not be executed:")
1380 print(p.getErrorReport())
1382 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1383 p.checkConsistency(info)
1384 if info.areWarningsOrErrors():
1385 print("The YACS XML schema is not coherent and will not be executed:")
1386 print(info.getGlobalRepr())
1388 e = pilot.ExecutorSwig()
1390 if p.getEffectiveState() != pilot.DONE:
1391 print(p.getErrorReport())
1395 def get(self, key = None):
1396 "Vérifie l'existence d'une clé de variable ou de paramètres"
1397 if key in self.__algorithm:
1398 return self.__algorithm.get( key )
1399 elif key in self.__P:
1400 return self.__P[key]
1402 allvariables = self.__P
1403 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1406 def pop(self, k, d):
1407 "Necessaire pour le pickling"
1408 return self.__algorithm.pop(k, d)
1410 def getAlgorithmRequiredParameters(self, noDetails=True):
1411 "Renvoie la liste des paramètres requis selon l'algorithme"
1412 return self.__algorithm.getRequiredParameters(noDetails)
1414 def getAlgorithmInputArguments(self):
1415 "Renvoie la liste des entrées requises selon l'algorithme"
1416 return self.__algorithm.getInputArguments()
1418 def getAlgorithmAttributes(self):
1419 "Renvoie la liste des attributs selon l'algorithme"
1420 return self.__algorithm.setAttributes()
1422 def setObserver(self, __V, __O, __I, __S):
1423 if self.__algorithm is None \
1424 or isinstance(self.__algorithm, dict) \
1425 or not hasattr(self.__algorithm,"StoredVariables"):
1426 raise ValueError("No observer can be build before choosing an algorithm.")
1427 if __V not in self.__algorithm:
1428 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1430 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1433 HookParameters = __I,
1436 def removeObserver(self, __V, __O, __A = False):
1437 if self.__algorithm is None \
1438 or isinstance(self.__algorithm, dict) \
1439 or not hasattr(self.__algorithm,"StoredVariables"):
1440 raise ValueError("No observer can be removed before choosing an algorithm.")
1441 if __V not in self.__algorithm:
1442 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1444 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1449 def hasObserver(self, __V):
1450 if self.__algorithm is None \
1451 or isinstance(self.__algorithm, dict) \
1452 or not hasattr(self.__algorithm,"StoredVariables"):
1454 if __V not in self.__algorithm:
1456 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1459 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1460 for k in self.__variable_names_not_public:
1461 if k in __allvariables: __allvariables.remove(k)
1462 return __allvariables
1464 def __contains__(self, key=None):
1465 "D.__contains__(k) -> True if D has a key k, else False"
1466 return key in self.__algorithm or key in self.__P
1469 "x.__repr__() <==> repr(x)"
1470 return repr(self.__A)+", "+repr(self.__P)
1473 "x.__str__() <==> str(x)"
1474 return str(self.__A)+", "+str(self.__P)
1476 def __setAlgorithm(self, choice = None ):
1478 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1479 d'assimilation. L'argument est un champ caractère se rapportant au nom
1480 d'un algorithme réalisant l'opération sur les arguments fixes.
1483 raise ValueError("Error: algorithm choice has to be given")
1484 if self.__algorithmName is not None:
1485 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1486 daDirectory = "daAlgorithms"
1488 # Recherche explicitement le fichier complet
1489 # ------------------------------------------
1491 for directory in sys.path:
1492 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1493 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1494 if module_path is None:
1496 "No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1498 # Importe le fichier complet comme un module
1499 # ------------------------------------------
1501 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1502 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1503 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1504 raise ImportError("this module does not define a valid elementary algorithm.")
1505 self.__algorithmName = str(choice)
1506 sys.path = sys_path_tmp ; del sys_path_tmp
1507 except ImportError as e:
1509 "The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1511 # Instancie un objet du type élémentaire du fichier
1512 # -------------------------------------------------
1513 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1516 def __shape_validate(self):
1518 Validation de la correspondance correcte des tailles des variables et
1519 des matrices s'il y en a.
1521 if self.__Xb is None: __Xb_shape = (0,)
1522 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1523 elif hasattr(self.__Xb,"shape"):
1524 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1525 else: __Xb_shape = self.__Xb.shape()
1526 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1528 if self.__Y is None: __Y_shape = (0,)
1529 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1530 elif hasattr(self.__Y,"shape"):
1531 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1532 else: __Y_shape = self.__Y.shape()
1533 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1535 if self.__U is None: __U_shape = (0,)
1536 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1537 elif hasattr(self.__U,"shape"):
1538 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1539 else: __U_shape = self.__U.shape()
1540 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1542 if self.__B is None: __B_shape = (0,0)
1543 elif hasattr(self.__B,"shape"):
1544 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1545 else: __B_shape = self.__B.shape()
1546 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1548 if self.__R is None: __R_shape = (0,0)
1549 elif hasattr(self.__R,"shape"):
1550 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1551 else: __R_shape = self.__R.shape()
1552 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1554 if self.__Q is None: __Q_shape = (0,0)
1555 elif hasattr(self.__Q,"shape"):
1556 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1557 else: __Q_shape = self.__Q.shape()
1558 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1560 if len(self.__HO) == 0: __HO_shape = (0,0)
1561 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1562 elif hasattr(self.__HO["Direct"],"shape"):
1563 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1564 else: __HO_shape = self.__HO["Direct"].shape()
1565 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1567 if len(self.__EM) == 0: __EM_shape = (0,0)
1568 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1569 elif hasattr(self.__EM["Direct"],"shape"):
1570 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1571 else: __EM_shape = self.__EM["Direct"].shape()
1572 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1574 if len(self.__CM) == 0: __CM_shape = (0,0)
1575 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1576 elif hasattr(self.__CM["Direct"],"shape"):
1577 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1578 else: __CM_shape = self.__CM["Direct"].shape()
1579 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1581 # Vérification des conditions
1582 # ---------------------------
1583 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1584 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1585 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1586 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1588 if not( min(__B_shape) == max(__B_shape) ):
1589 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1590 if not( min(__R_shape) == max(__R_shape) ):
1591 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1592 if not( min(__Q_shape) == max(__Q_shape) ):
1593 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1594 if not( min(__EM_shape) == max(__EM_shape) ):
1595 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1597 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1599 "Shape characteristic of observation operator (H)"+\
1600 " \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1601 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1603 "Shape characteristic of observation operator (H)"+\
1604 " \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1605 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1607 "Shape characteristic of observation operator (H)"+\
1608 " \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1609 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1611 "Shape characteristic of observation operator (H)"+\
1612 " \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1614 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1615 if self.__algorithmName in ["EnsembleBlue",]:
1616 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1617 self.__Xb = Persistence.OneVector("Background")
1618 for member in asPersistentVector:
1619 self.__Xb.store( numpy.asarray(member, dtype=float) )
1620 __Xb_shape = min(__B_shape)
1623 "Shape characteristic of a priori errors covariance matrix (B)"+\
1624 " \"%s\" and background vector (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1626 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1628 "Shape characteristic of observation errors covariance matrix (R)"+\
1629 " \"%s\" and observation vector (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1631 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1633 "Shape characteristic of evolution model (EM)"+\
1634 " \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1636 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1638 "Shape characteristic of control model (CM)"+\
1639 " \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1641 if ("Bounds" in self.__P) \
1642 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1643 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1644 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1645 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1647 if ("StateBoundsForQuantiles" in self.__P) \
1648 and (isinstance(self.__P["StateBoundsForQuantiles"], list) or isinstance(self.__P["StateBoundsForQuantiles"], tuple)) \
1649 and (len(self.__P["StateBoundsForQuantiles"]) != max(__Xb_shape)):
1650 raise ValueError("The number \"%s\" of bound pairs for the quantile state (X) components is different of the size \"%s\" of the state itself." \
1651 %(len(self.__P["StateBoundsForQuantiles"]),max(__Xb_shape)))
1655 # ==============================================================================
1656 class RegulationAndParameters(object):
1658 Classe générale d'interface d'action pour la régulation et ses paramètres
1660 __slots__ = ("__name", "__P")
1663 name = "GenericRegulation",
1670 self.__name = str(name)
1673 if asAlgorithm is None and asScript is not None:
1674 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1676 __Algo = asAlgorithm
1678 if asDict is None and asScript is not None:
1679 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1683 if __Dict is not None:
1684 self.__P.update( dict(__Dict) )
1686 if __Algo is not None:
1687 self.__P.update( {"Algorithm":str(__Algo)} )
1689 def get(self, key = None):
1690 "Vérifie l'existence d'une clé de variable ou de paramètres"
1692 return self.__P[key]
1696 # ==============================================================================
1697 class DataObserver(object):
1699 Classe générale d'interface de type observer
1701 __slots__ = ("__name", "__V", "__O", "__I")
1704 name = "GenericObserver",
1716 self.__name = str(name)
1721 if onVariable is None:
1722 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1723 elif type(onVariable) in (tuple, list):
1724 self.__V = tuple(map( str, onVariable ))
1725 if withInfo is None:
1728 self.__I = (str(withInfo),)*len(self.__V)
1729 elif isinstance(onVariable, str):
1730 self.__V = (onVariable,)
1731 if withInfo is None:
1732 self.__I = (onVariable,)
1734 self.__I = (str(withInfo),)
1736 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1738 if asObsObject is not None:
1739 self.__O = asObsObject
1741 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1742 __Function = Observer2Func(__FunctionText)
1743 self.__O = __Function.getfunc()
1745 for k in range(len(self.__V)):
1748 if ename not in withAlgo:
1749 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1751 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1754 "x.__repr__() <==> repr(x)"
1755 return repr(self.__V)+"\n"+repr(self.__O)
1758 "x.__str__() <==> str(x)"
1759 return str(self.__V)+"\n"+str(self.__O)
1761 # ==============================================================================
1762 class UserScript(object):
1764 Classe générale d'interface de type texte de script utilisateur
1766 __slots__ = ("__name", "__F")
1769 name = "GenericUserScript",
1776 self.__name = str(name)
1778 if asString is not None:
1780 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1781 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1782 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1783 self.__F = Templates.ObserverTemplates[asTemplate]
1784 elif asScript is not None:
1785 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1790 "x.__repr__() <==> repr(x)"
1791 return repr(self.__F)
1794 "x.__str__() <==> str(x)"
1795 return str(self.__F)
1797 # ==============================================================================
1798 class ExternalParameters(object):
1800 Classe générale d'interface pour le stockage des paramètres externes
1802 __slots__ = ("__name", "__P")
1805 name = "GenericExternalParameters",
1811 self.__name = str(name)
1814 self.updateParameters( asDict, asScript )
1816 def updateParameters(self,
1820 "Mise à jour des paramètres"
1821 if asDict is None and asScript is not None:
1822 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1826 if __Dict is not None:
1827 self.__P.update( dict(__Dict) )
1829 def get(self, key = None):
1831 return self.__P[key]
1833 return list(self.__P.keys())
1836 return list(self.__P.keys())
1838 def pop(self, k, d):
1839 return self.__P.pop(k, d)
1842 return self.__P.items()
1844 def __contains__(self, key=None):
1845 "D.__contains__(k) -> True if D has a key k, else False"
1846 return key in self.__P
1848 # ==============================================================================
1849 class State(object):
1851 Classe générale d'interface de type état
1854 "__name", "__check", "__V", "__T", "__is_vector", "__is_series",
1859 name = "GenericVector",
1861 asPersistentVector = None,
1867 toBeChecked = False,
1870 Permet de définir un vecteur :
1871 - asVector : entrée des données, comme un vecteur compatible avec le
1872 constructeur de numpy.matrix, ou "True" si entrée par script.
1873 - asPersistentVector : entrée des données, comme une série de vecteurs
1874 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1875 type Persistence, ou "True" si entrée par script.
1876 - asScript : si un script valide est donné contenant une variable
1877 nommée "name", la variable est de type "asVector" (par défaut) ou
1878 "asPersistentVector" selon que l'une de ces variables est placée à
1880 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1881 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1882 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1883 nommée "name"), on récupère les colonnes et on les range ligne après
1884 ligne (colMajor=False, par défaut) ou colonne après colonne
1885 (colMajor=True). La variable résultante est de type "asVector" (par
1886 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1889 self.__name = str(name)
1890 self.__check = bool(toBeChecked)
1894 self.__is_vector = False
1895 self.__is_series = False
1897 if asScript is not None:
1898 __Vector, __Series = None, None
1899 if asPersistentVector:
1900 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1902 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1903 elif asDataFile is not None:
1904 __Vector, __Series = None, None
1905 if asPersistentVector:
1906 if colNames is not None:
1907 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1909 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1910 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1911 __Series = numpy.transpose(__Series)
1912 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1913 __Series = numpy.transpose(__Series)
1915 if colNames is not None:
1916 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1918 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1920 __Vector = numpy.ravel(__Vector, order = "F")
1922 __Vector = numpy.ravel(__Vector, order = "C")
1924 __Vector, __Series = asVector, asPersistentVector
1926 if __Vector is not None:
1927 self.__is_vector = True
1928 if isinstance(__Vector, str):
1929 __Vector = PlatformInfo.strvect2liststr( __Vector )
1930 self.__V = numpy.ravel(numpy.asarray( __Vector, dtype=float )).reshape((-1,1))
1931 self.shape = self.__V.shape
1932 self.size = self.__V.size
1933 elif __Series is not None:
1934 self.__is_series = True
1935 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1936 self.__V = Persistence.OneVector(self.__name)
1937 if isinstance(__Series, str):
1938 __Series = PlatformInfo.strmatrix2liststr(__Series)
1939 for member in __Series:
1940 if isinstance(member, str):
1941 member = PlatformInfo.strvect2liststr( member )
1942 self.__V.store(numpy.asarray( member, dtype=float ))
1945 if isinstance(self.__V.shape, (tuple, list)):
1946 self.shape = self.__V.shape
1948 self.shape = self.__V.shape()
1949 if len(self.shape) == 1:
1950 self.shape = (self.shape[0],1)
1951 self.size = self.shape[0] * self.shape[1]
1954 "The %s object is improperly defined or undefined,"%self.__name+\
1955 " it requires at minima either a vector, a list/tuple of"+\
1956 " vectors or a persistent object. Please check your vector input.")
1958 if scheduledBy is not None:
1959 self.__T = scheduledBy
1961 def getO(self, withScheduler=False):
1963 return self.__V, self.__T
1964 elif self.__T is None:
1970 "Vérification du type interne"
1971 return self.__is_vector
1974 "Vérification du type interne"
1975 return self.__is_series
1978 "x.__repr__() <==> repr(x)"
1979 return repr(self.__V)
1982 "x.__str__() <==> str(x)"
1983 return str(self.__V)
1985 # ==============================================================================
1986 class Covariance(object):
1988 Classe générale d'interface de type covariance
1991 "__name", "__check", "__C", "__is_scalar", "__is_vector", "__is_matrix",
1992 "__is_object", "shape", "size",
1996 name = "GenericCovariance",
1997 asCovariance = None,
1998 asEyeByScalar = None,
1999 asEyeByVector = None,
2002 toBeChecked = False,
2005 Permet de définir une covariance :
2006 - asCovariance : entrée des données, comme une matrice compatible avec
2007 le constructeur de numpy.matrix
2008 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
2009 multiplicatif d'une matrice de corrélation identité, aucune matrice
2010 n'étant donc explicitement à donner
2011 - asEyeByVector : entrée des données comme un seul vecteur de variance,
2012 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
2013 n'étant donc explicitement à donner
2014 - asCovObject : entrée des données comme un objet python, qui a les
2015 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
2016 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
2017 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
2018 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
2019 pleine doit être vérifié
2021 self.__name = str(name)
2022 self.__check = bool(toBeChecked)
2025 self.__is_scalar = False
2026 self.__is_vector = False
2027 self.__is_matrix = False
2028 self.__is_object = False
2030 if asScript is not None:
2031 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
2033 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2035 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2037 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2039 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2041 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
2043 if __Scalar is not None:
2044 if isinstance(__Scalar, str):
2045 __Scalar = PlatformInfo.strvect2liststr( __Scalar )
2046 if len(__Scalar) > 0: __Scalar = __Scalar[0]
2047 if numpy.array(__Scalar).size != 1:
2049 " The diagonal multiplier given to define a sparse matrix is"+\
2050 " not a unique scalar value.\n Its actual measured size is"+\
2051 " %i. Please check your scalar input."%numpy.array(__Scalar).size)
2052 self.__is_scalar = True
2053 self.__C = numpy.abs( float(__Scalar) )
2056 elif __Vector is not None:
2057 if isinstance(__Vector, str):
2058 __Vector = PlatformInfo.strvect2liststr( __Vector )
2059 self.__is_vector = True
2060 self.__C = numpy.abs( numpy.ravel(numpy.asarray( __Vector, dtype=float )) )
2061 self.shape = (self.__C.size,self.__C.size)
2062 self.size = self.__C.size**2
2063 elif __Matrix is not None:
2064 self.__is_matrix = True
2065 self.__C = numpy.matrix( __Matrix, float )
2066 self.shape = self.__C.shape
2067 self.size = self.__C.size
2068 elif __Object is not None:
2069 self.__is_object = True
2071 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__matmul__","__mul__","__rmatmul__","__rmul__"):
2072 if not hasattr(self.__C,at):
2073 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
2074 if hasattr(self.__C,"shape"):
2075 self.shape = self.__C.shape
2078 if hasattr(self.__C,"size"):
2079 self.size = self.__C.size
2087 def __validate(self):
2089 if self.__C is None:
2090 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
2091 if self.ismatrix() and min(self.shape) != max(self.shape):
2092 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
2093 if self.isobject() and min(self.shape) != max(self.shape):
2094 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
2095 if self.isscalar() and self.__C <= 0:
2096 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
2097 if self.isvector() and (self.__C <= 0).any():
2098 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
2099 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
2101 numpy.linalg.cholesky( self.__C )
2103 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
2104 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
2108 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
2111 "Vérification du type interne"
2112 return self.__is_scalar
2115 "Vérification du type interne"
2116 return self.__is_vector
2119 "Vérification du type interne"
2120 return self.__is_matrix
2123 "Vérification du type interne"
2124 return self.__is_object
2129 return Covariance(self.__name+"I", asCovariance = numpy.linalg.inv(self.__C) )
2130 elif self.isvector():
2131 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
2132 elif self.isscalar():
2133 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
2134 elif self.isobject() and hasattr(self.__C,"getI"):
2135 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
2137 return None # Indispensable
2142 return Covariance(self.__name+"T", asCovariance = self.__C.T )
2143 elif self.isvector():
2144 return Covariance(self.__name+"T", asEyeByVector = self.__C )
2145 elif self.isscalar():
2146 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
2147 elif self.isobject() and hasattr(self.__C,"getT"):
2148 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
2150 raise AttributeError("the %s covariance matrix has no getT attribute."%(self.__name,))
2153 "Décomposition de Cholesky"
2155 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
2156 elif self.isvector():
2157 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
2158 elif self.isscalar():
2159 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
2160 elif self.isobject() and hasattr(self.__C,"cholesky"):
2161 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
2163 raise AttributeError("the %s covariance matrix has no cholesky attribute."%(self.__name,))
2165 def choleskyI(self):
2166 "Inversion de la décomposition de Cholesky"
2168 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.linalg.cholesky(self.__C)) )
2169 elif self.isvector():
2170 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
2171 elif self.isscalar():
2172 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
2173 elif self.isobject() and hasattr(self.__C,"choleskyI"):
2174 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
2176 raise AttributeError("the %s covariance matrix has no choleskyI attribute."%(self.__name,))
2179 "Racine carrée matricielle"
2182 return Covariance(self.__name+"C", asCovariance = numpy.real(scipy.linalg.sqrtm(self.__C)) )
2183 elif self.isvector():
2184 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
2185 elif self.isscalar():
2186 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
2187 elif self.isobject() and hasattr(self.__C,"sqrtm"):
2188 return Covariance(self.__name+"C", asCovObject = self.__C.sqrtm() )
2190 raise AttributeError("the %s covariance matrix has no sqrtm attribute."%(self.__name,))
2193 "Inversion de la racine carrée matricielle"
2196 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.real(scipy.linalg.sqrtm(self.__C))) )
2197 elif self.isvector():
2198 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
2199 elif self.isscalar():
2200 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
2201 elif self.isobject() and hasattr(self.__C,"sqrtmI"):
2202 return Covariance(self.__name+"H", asCovObject = self.__C.sqrtmI() )
2204 raise AttributeError("the %s covariance matrix has no sqrtmI attribute."%(self.__name,))
2206 def diag(self, msize=None):
2207 "Diagonale de la matrice"
2209 return numpy.diag(self.__C)
2210 elif self.isvector():
2212 elif self.isscalar():
2214 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2216 return self.__C * numpy.ones(int(msize))
2217 elif self.isobject() and hasattr(self.__C,"diag"):
2218 return self.__C.diag()
2220 raise AttributeError("the %s covariance matrix has no diag attribute."%(self.__name,))
2222 def trace(self, msize=None):
2223 "Trace de la matrice"
2225 return numpy.trace(self.__C)
2226 elif self.isvector():
2227 return float(numpy.sum(self.__C))
2228 elif self.isscalar():
2230 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2232 return self.__C * int(msize)
2233 elif self.isobject():
2234 return self.__C.trace()
2236 raise AttributeError("the %s covariance matrix has no trace attribute."%(self.__name,))
2238 def asfullmatrix(self, msize=None):
2241 return numpy.asarray(self.__C, dtype=float)
2242 elif self.isvector():
2243 return numpy.asarray( numpy.diag(self.__C), dtype=float )
2244 elif self.isscalar():
2246 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2248 return numpy.asarray( self.__C * numpy.eye(int(msize)), dtype=float )
2249 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
2250 return self.__C.asfullmatrix()
2252 raise AttributeError("the %s covariance matrix has no asfullmatrix attribute."%(self.__name,))
2254 def assparsematrix(self):
2262 "x.__repr__() <==> repr(x)"
2263 return repr(self.__C)
2266 "x.__str__() <==> str(x)"
2267 return str(self.__C)
2269 def __add__(self, other):
2270 "x.__add__(y) <==> x+y"
2271 if self.ismatrix() or self.isobject():
2272 return self.__C + numpy.asmatrix(other)
2273 elif self.isvector() or self.isscalar():
2274 _A = numpy.asarray(other)
2275 if len(_A.shape) == 1:
2276 _A.reshape((-1,1))[::2] += self.__C
2278 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
2279 return numpy.asmatrix(_A)
2281 def __radd__(self, other):
2282 "x.__radd__(y) <==> y+x"
2283 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
2285 def __sub__(self, other):
2286 "x.__sub__(y) <==> x-y"
2287 if self.ismatrix() or self.isobject():
2288 return self.__C - numpy.asmatrix(other)
2289 elif self.isvector() or self.isscalar():
2290 _A = numpy.asarray(other)
2291 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
2292 return numpy.asmatrix(_A)
2294 def __rsub__(self, other):
2295 "x.__rsub__(y) <==> y-x"
2296 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
2299 "x.__neg__() <==> -x"
2302 def __matmul__(self, other):
2303 "x.__mul__(y) <==> x@y"
2304 if self.ismatrix() and isinstance(other, (int, float)):
2305 return numpy.asarray(self.__C) * other
2306 elif self.ismatrix() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2307 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2308 return numpy.ravel(self.__C @ numpy.ravel(other))
2309 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2310 return numpy.asarray(self.__C) @ numpy.asarray(other)
2312 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asarray(other).shape,self.__name))
2313 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2314 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2315 return numpy.ravel(self.__C) * numpy.ravel(other)
2316 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2317 return numpy.ravel(self.__C).reshape((-1,1)) * numpy.asarray(other)
2319 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2320 elif self.isscalar() and isinstance(other,numpy.matrix):
2321 return numpy.asarray(self.__C * other)
2322 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2323 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2324 return self.__C * numpy.ravel(other)
2326 return self.__C * numpy.asarray(other)
2327 elif self.isobject():
2328 return self.__C.__matmul__(other)
2330 raise NotImplementedError("%s covariance matrix __matmul__ method not available for %s type!"%(self.__name,type(other)))
2332 def __mul__(self, other):
2333 "x.__mul__(y) <==> x*y"
2334 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2335 return self.__C * other
2336 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2337 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2338 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2339 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2340 return self.__C * numpy.asmatrix(other)
2343 "operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
2344 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2345 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2346 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
2347 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2348 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
2351 "operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2352 elif self.isscalar() and isinstance(other,numpy.matrix):
2353 return self.__C * other
2354 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2355 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2356 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2358 return self.__C * numpy.asmatrix(other)
2359 elif self.isobject():
2360 return self.__C.__mul__(other)
2362 raise NotImplementedError(
2363 "%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
2365 def __rmatmul__(self, other):
2366 "x.__rmul__(y) <==> y@x"
2367 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2368 return other * self.__C
2369 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2370 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2371 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2372 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2373 return numpy.asmatrix(other) * self.__C
2376 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2377 elif self.isvector() and isinstance(other,numpy.matrix):
2378 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2379 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2380 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2381 return numpy.asmatrix(numpy.array(other) * self.__C)
2384 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2385 elif self.isscalar() and isinstance(other,numpy.matrix):
2386 return other * self.__C
2387 elif self.isobject():
2388 return self.__C.__rmatmul__(other)
2390 raise NotImplementedError(
2391 "%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name,type(other)))
2393 def __rmul__(self, other):
2394 "x.__rmul__(y) <==> y*x"
2395 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2396 return other * self.__C
2397 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2398 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2399 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2400 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2401 return numpy.asmatrix(other) * self.__C
2404 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2405 elif self.isvector() and isinstance(other,numpy.matrix):
2406 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2407 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2408 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2409 return numpy.asmatrix(numpy.array(other) * self.__C)
2412 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2413 elif self.isscalar() and isinstance(other,numpy.matrix):
2414 return other * self.__C
2415 elif self.isscalar() and isinstance(other,float):
2416 return other * self.__C
2417 elif self.isobject():
2418 return self.__C.__rmul__(other)
2420 raise NotImplementedError(
2421 "%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
2424 "x.__len__() <==> len(x)"
2425 return self.shape[0]
2427 # ==============================================================================
2428 class Observer2Func(object):
2430 Création d'une fonction d'observateur a partir de son texte
2432 __slots__ = ("__corps")
2434 def __init__(self, corps=""):
2435 self.__corps = corps
2436 def func(self,var,info):
2437 "Fonction d'observation"
2440 "Restitution du pointeur de fonction dans l'objet"
2443 # ==============================================================================
2444 class CaseLogger(object):
2446 Conservation des commandes de création d'un cas
2449 "__name", "__objname", "__logSerie", "__switchoff", "__viewers",
2453 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
2454 self.__name = str(__name)
2455 self.__objname = str(__objname)
2456 self.__logSerie = []
2457 self.__switchoff = False
2459 "TUI" :Interfaces._TUIViewer,
2460 "SCD" :Interfaces._SCDViewer,
2461 "YACS":Interfaces._YACSViewer,
2462 "SimpleReportInRst":Interfaces._SimpleReportInRstViewer,
2463 "SimpleReportInHtml":Interfaces._SimpleReportInHtmlViewer,
2464 "SimpleReportInPlainTxt":Interfaces._SimpleReportInPlainTxtViewer,
2467 "TUI" :Interfaces._TUIViewer,
2468 "COM" :Interfaces._COMViewer,
2470 if __addViewers is not None:
2471 self.__viewers.update(dict(__addViewers))
2472 if __addLoaders is not None:
2473 self.__loaders.update(dict(__addLoaders))
2475 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
2476 "Enregistrement d'une commande individuelle"
2477 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
2478 if "self" in __keys: __keys.remove("self")
2479 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
2481 self.__switchoff = True
2483 self.__switchoff = False
2485 def dump(self, __filename=None, __format="TUI", __upa=""):
2486 "Restitution normalisée des commandes"
2487 if __format in self.__viewers:
2488 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2490 raise ValueError("Dumping as \"%s\" is not available"%__format)
2491 return __formater.dump(__filename, __upa)
2493 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2494 "Chargement normalisé des commandes"
2495 if __format in self.__loaders:
2496 __formater = self.__loaders[__format]()
2498 raise ValueError("Loading as \"%s\" is not available"%__format)
2499 return __formater.load(__filename, __content, __object)
2501 # ==============================================================================
2504 _extraArguments = None,
2505 _sFunction = lambda x: x,
2510 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2511 correspondante de valeurs de la fonction en argument
2513 # Vérifications et définitions initiales
2514 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2515 if not PlatformInfo.isIterable( __xserie ):
2516 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2518 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2521 __mpWorkers = int(_mpWorkers)
2523 import multiprocessing
2534 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2535 with multiprocessing.Pool(__mpWorkers) as pool:
2536 __multiHX = pool.map( _sFunction, _jobs )
2539 # logging.debug("MULTF Internal multiprocessing calculation end")
2541 # logging.debug("MULTF Internal monoprocessing calculation begin")
2543 if _extraArguments is None:
2544 for __xvalue in __xserie:
2545 __multiHX.append( _sFunction( __xvalue ) )
2546 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2547 for __xvalue in __xserie:
2548 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2549 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2550 for __xvalue in __xserie:
2551 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2553 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2554 # logging.debug("MULTF Internal monoprocessing calculation end")
2556 # logging.debug("MULTF Internal multifonction calculations end")
2559 # ==============================================================================
2560 if __name__ == "__main__":
2561 print('\n AUTODIAGNOSTIC\n')