1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2023 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
36 from functools import partial
37 from daCore import Persistence, PlatformInfo, Interfaces
38 from daCore import Templates
40 # ==============================================================================
41 class CacheManager(object):
43 Classe générale de gestion d'un cache de calculs
46 "__tolerBP", "__lengthOR", "__initlnOR", "__seenNames", "__enabled",
51 toleranceInRedundancy = 1.e-18,
52 lengthOfRedundancy = -1,
55 Les caractéristiques de tolérance peuvent être modifiées à la création.
57 self.__tolerBP = float(toleranceInRedundancy)
58 self.__lengthOR = int(lengthOfRedundancy)
59 self.__initlnOR = self.__lengthOR
69 def wasCalculatedIn(self, xValue, oName="" ):
70 "Vérifie l'existence d'un calcul correspondant à la valeur"
74 for i in range(min(len(self.__listOPCV),self.__lengthOR)-1,-1,-1):
75 if not hasattr(xValue, 'size'):
77 elif (str(oName) != self.__listOPCV[i][3]):
79 elif (xValue.size != self.__listOPCV[i][0].size):
81 elif (numpy.ravel(xValue)[0] - self.__listOPCV[i][0][0]) > (self.__tolerBP * self.__listOPCV[i][2] / self.__listOPCV[i][0].size):
83 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < (self.__tolerBP * self.__listOPCV[i][2]):
85 __HxV = self.__listOPCV[i][1]
89 def storeValueInX(self, xValue, HxValue, oName="" ):
90 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
91 if self.__lengthOR < 0:
92 self.__lengthOR = 2 * min(numpy.size(xValue), 50) + 2
93 self.__initlnOR = self.__lengthOR
94 self.__seenNames.append(str(oName))
95 if str(oName) not in self.__seenNames: # Etend la liste si nouveau
96 self.__lengthOR += 2 * min(numpy.size(xValue), 50) + 2
97 self.__initlnOR += self.__lengthOR
98 self.__seenNames.append(str(oName))
99 while len(self.__listOPCV) > self.__lengthOR:
100 self.__listOPCV.pop(0)
101 self.__listOPCV.append( (
102 copy.copy(numpy.ravel(xValue)), # 0 Previous point
103 copy.copy(HxValue), # 1 Previous value
104 numpy.linalg.norm(xValue), # 2 Norm
105 str(oName), # 3 Operator name
110 self.__initlnOR = self.__lengthOR
112 self.__enabled = False
116 self.__lengthOR = self.__initlnOR
117 self.__enabled = True
119 # ==============================================================================
120 class Operator(object):
122 Classe générale d'interface de type opérateur simple
125 "__name", "__NbCallsAsMatrix", "__NbCallsAsMethod",
126 "__NbCallsOfCached", "__reduceM", "__avoidRC", "__inputAsMF",
127 "__mpEnabled", "__extraArgs", "__Method", "__Matrix", "__Type",
136 name = "GenericOperator",
139 avoidingRedundancy = True,
140 reducingMemoryUse = False,
141 inputAsMultiFunction = False,
142 enableMultiProcess = False,
143 extraArguments = None,
146 On construit un objet de ce type en fournissant, à l'aide de l'un des
147 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
150 - name : nom d'opérateur
151 - fromMethod : argument de type fonction Python
152 - fromMatrix : argument adapté au constructeur numpy.array/matrix
153 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
154 - reducingMemoryUse : booléen forçant (ou pas) des calculs moins
156 - inputAsMultiFunction : booléen indiquant une fonction explicitement
157 définie (ou pas) en multi-fonction
158 - extraArguments : arguments supplémentaires passés à la fonction de
159 base et ses dérivées (tuple ou dictionnaire)
161 self.__name = str(name)
162 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
163 self.__reduceM = bool( reducingMemoryUse )
164 self.__avoidRC = bool( avoidingRedundancy )
165 self.__inputAsMF = bool( inputAsMultiFunction )
166 self.__mpEnabled = bool( enableMultiProcess )
167 self.__extraArgs = extraArguments
168 if fromMethod is not None and self.__inputAsMF:
169 self.__Method = fromMethod # logtimer(fromMethod)
171 self.__Type = "Method"
172 elif fromMethod is not None and not self.__inputAsMF:
173 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
175 self.__Type = "Method"
176 elif fromMatrix is not None:
178 if isinstance(fromMatrix, str):
179 fromMatrix = PlatformInfo.strmatrix2liststr( fromMatrix )
180 self.__Matrix = numpy.asarray( fromMatrix, dtype=float )
181 self.__Type = "Matrix"
187 def disableAvoidingRedundancy(self):
189 Operator.CM.disable()
191 def enableAvoidingRedundancy(self):
196 Operator.CM.disable()
202 def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False):
204 Permet de restituer le résultat de l'application de l'opérateur à une
205 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
206 argument devant a priori être du bon type.
208 - les arguments par série sont :
209 - xValue : argument adapté pour appliquer l'opérateur
210 - HValue : valeur précalculée de l'opérateur en ce point
211 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
218 if HValue is not None:
222 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
224 if _HValue is not None:
225 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
227 for i in range(len(_HValue)):
228 _HxValue.append( _HValue[i] )
230 Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name)
235 for i, xv in enumerate(_xValue):
237 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
239 __alreadyCalculated = False
241 if __alreadyCalculated:
242 self.__addOneCacheCall()
245 if self.__Matrix is not None:
246 self.__addOneMatrixCall()
247 _hv = self.__Matrix @ numpy.ravel(xv)
249 self.__addOneMethodCall()
253 _HxValue.append( _hv )
255 if len(_xserie)>0 and self.__Matrix is None:
256 if self.__extraArgs is None:
257 _hserie = self.__Method( _xserie ) # Calcul MF
259 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
260 if not hasattr(_hserie, "pop"):
262 "The user input multi-function doesn't seem to return a"+\
263 " result sequence, behaving like a mono-function. It has"+\
271 Operator.CM.storeValueInX(_xv,_hv,self.__name)
273 if returnSerieAsArrayMatrix:
274 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
276 if argsAsSerie: return _HxValue
277 else: return _HxValue[-1]
279 def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
281 Permet de restituer le résultat de l'application de l'opérateur à des
282 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
283 argument devant a priori être du bon type. Si la uValue est None,
284 on suppose que l'opérateur ne s'applique qu'à xValue.
286 - paires : les arguments par paire sont :
287 - xValue : argument X adapté pour appliquer l'opérateur
288 - uValue : argument U adapté pour appliquer l'opérateur
289 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
291 if argsAsSerie: _xuValue = paires
292 else: _xuValue = (paires,)
293 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
295 if self.__Matrix is not None:
297 for paire in _xuValue:
298 _xValue, _uValue = paire
299 self.__addOneMatrixCall()
300 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
303 for paire in _xuValue:
304 _xValue, _uValue = paire
305 if _uValue is not None:
306 _xuArgs.append( paire )
308 _xuArgs.append( _xValue )
309 self.__addOneMethodCall( len(_xuArgs) )
310 if self.__extraArgs is None:
311 _HxValue = self.__Method( _xuArgs ) # Calcul MF
313 _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF
315 if returnSerieAsArrayMatrix:
316 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
318 if argsAsSerie: return _HxValue
319 else: return _HxValue[-1]
321 def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
323 Permet de restituer le résultat de l'application de l'opérateur à une
324 série d'arguments xValue, sachant que l'opérateur est valable en
325 xNominal. Cette méthode se contente d'appliquer, son argument devant a
326 priori être du bon type. Si l'opérateur est linéaire car c'est une
327 matrice, alors il est valable en tout point nominal et xNominal peut
328 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
329 permet d'indiquer que l'argument est multi-paires.
331 - paires : les arguments par paire sont :
332 - xNominal : série d'arguments permettant de donner le point où
333 l'opérateur est construit pour être ensuite appliqué
334 - xValue : série d'arguments adaptés pour appliquer l'opérateur
335 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
337 if argsAsSerie: _nxValue = paires
338 else: _nxValue = (paires,)
339 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
341 if self.__Matrix is not None:
343 for paire in _nxValue:
344 _xNominal, _xValue = paire
345 self.__addOneMatrixCall()
346 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
348 self.__addOneMethodCall( len(_nxValue) )
349 if self.__extraArgs is None:
350 _HxValue = self.__Method( _nxValue ) # Calcul MF
352 _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
354 if returnSerieAsArrayMatrix:
355 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
357 if argsAsSerie: return _HxValue
358 else: return _HxValue[-1]
360 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
362 Permet de renvoyer l'opérateur sous la forme d'une matrice
364 if self.__Matrix is not None:
365 self.__addOneMatrixCall()
366 mValue = [self.__Matrix,]
367 elif not isinstance(ValueForMethodForm,str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
370 self.__addOneMethodCall( len(ValueForMethodForm) )
371 for _vfmf in ValueForMethodForm:
372 mValue.append( self.__Method(((_vfmf, None),)) )
374 self.__addOneMethodCall()
375 mValue = self.__Method(((ValueForMethodForm, None),))
377 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
379 if argsAsSerie: return mValue
380 else: return mValue[-1]
384 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
385 la forme d'une matrice
387 if self.__Matrix is not None:
388 return self.__Matrix.shape
390 raise ValueError("Matrix form of the operator is not available, nor the shape")
392 def nbcalls(self, which=None):
394 Renvoie les nombres d'évaluations de l'opérateur
397 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
398 self.__NbCallsAsMatrix,
399 self.__NbCallsAsMethod,
400 self.__NbCallsOfCached,
401 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
402 Operator.NbCallsAsMatrix,
403 Operator.NbCallsAsMethod,
404 Operator.NbCallsOfCached,
406 if which is None: return __nbcalls
407 else: return __nbcalls[which]
409 def __addOneMatrixCall(self):
410 "Comptabilise un appel"
411 self.__NbCallsAsMatrix += 1 # Decompte local
412 Operator.NbCallsAsMatrix += 1 # Decompte global
414 def __addOneMethodCall(self, nb = 1):
415 "Comptabilise un appel"
416 self.__NbCallsAsMethod += nb # Decompte local
417 Operator.NbCallsAsMethod += nb # Decompte global
419 def __addOneCacheCall(self):
420 "Comptabilise un appel"
421 self.__NbCallsOfCached += 1 # Decompte local
422 Operator.NbCallsOfCached += 1 # Decompte global
424 # ==============================================================================
425 class FullOperator(object):
427 Classe générale d'interface de type opérateur complet
428 (Direct, Linéaire Tangent, Adjoint)
431 "__name", "__check", "__extraArgs", "__FO", "__T",
435 name = "GenericFullOperator",
437 asOneFunction = None, # 1 Fonction
438 asThreeFunctions = None, # 3 Fonctions in a dictionary
439 asScript = None, # 1 or 3 Fonction(s) by script
440 asDict = None, # Parameters
442 extraArguments = None,
443 performancePrf = None,
444 inputAsMF = False,# Fonction(s) as Multi-Functions
449 self.__name = str(name)
450 self.__check = bool(toBeChecked)
451 self.__extraArgs = extraArguments
456 if (asDict is not None) and isinstance(asDict, dict):
457 __Parameters.update( asDict )
458 # Priorité à EnableMultiProcessingInDerivatives=True
459 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
460 __Parameters["EnableMultiProcessingInDerivatives"] = True
461 __Parameters["EnableMultiProcessingInEvaluation"] = False
462 if "EnableMultiProcessingInDerivatives" not in __Parameters:
463 __Parameters["EnableMultiProcessingInDerivatives"] = False
464 if __Parameters["EnableMultiProcessingInDerivatives"]:
465 __Parameters["EnableMultiProcessingInEvaluation"] = False
466 if "EnableMultiProcessingInEvaluation" not in __Parameters:
467 __Parameters["EnableMultiProcessingInEvaluation"] = False
468 if "withIncrement" in __Parameters: # Temporaire
469 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
470 # Le défaut est équivalent à "ReducedOverallRequirements"
471 __reduceM, __avoidRC = True, True
472 if performancePrf is not None:
473 if performancePrf == "ReducedAmountOfCalculation":
474 __reduceM, __avoidRC = False, True
475 elif performancePrf == "ReducedMemoryFootprint":
476 __reduceM, __avoidRC = True, False
477 elif performancePrf == "NoSavings":
478 __reduceM, __avoidRC = False, False
480 if asScript is not None:
481 __Matrix, __Function = None, None
483 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
485 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
486 __Function.update({"useApproximatedDerivatives":True})
487 __Function.update(__Parameters)
488 elif asThreeFunctions:
490 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
491 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
492 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
494 __Function.update(__Parameters)
497 if asOneFunction is not None:
498 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
499 if asOneFunction["Direct"] is not None:
500 __Function = asOneFunction
502 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
504 __Function = { "Direct":asOneFunction }
505 __Function.update({"useApproximatedDerivatives":True})
506 __Function.update(__Parameters)
507 elif asThreeFunctions is not None:
508 if isinstance(asThreeFunctions, dict) and \
509 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
510 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
511 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
512 __Function = asThreeFunctions
513 elif isinstance(asThreeFunctions, dict) and \
514 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
515 __Function = asThreeFunctions
516 __Function.update({"useApproximatedDerivatives":True})
519 "The functions has to be given in a dictionnary which have either"+\
520 " 1 key (\"Direct\") or"+\
521 " 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
522 if "Direct" not in asThreeFunctions:
523 __Function["Direct"] = asThreeFunctions["Tangent"]
524 __Function.update(__Parameters)
528 if appliedInX is not None and isinstance(appliedInX, dict):
529 __appliedInX = appliedInX
530 elif appliedInX is not None:
531 __appliedInX = {"HXb":appliedInX}
535 if scheduledBy is not None:
536 self.__T = scheduledBy
538 if isinstance(__Function, dict) and \
539 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
540 ("Direct" in __Function) and (__Function["Direct"] is not None):
541 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
542 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
543 if "withdX" not in __Function: __Function["withdX"] = None
544 if "withReducingMemoryUse" not in __Function: __Function["withReducingMemoryUse"] = __reduceM
545 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = __avoidRC
546 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
547 if "withLengthOfRedundancy" not in __Function: __Function["withLengthOfRedundancy"] = -1
548 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
549 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
550 from daCore import NumericObjects
551 FDA = NumericObjects.FDApproximation(
553 Function = __Function["Direct"],
554 centeredDF = __Function["CenteredFiniteDifference"],
555 increment = __Function["DifferentialIncrement"],
556 dX = __Function["withdX"],
557 extraArguments = self.__extraArgs,
558 reducingMemoryUse = __Function["withReducingMemoryUse"],
559 avoidingRedundancy = __Function["withAvoidingRedundancy"],
560 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
561 lengthOfRedundancy = __Function["withLengthOfRedundancy"],
562 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
563 mpWorkers = __Function["NumberOfProcesses"],
564 mfEnabled = __Function["withmfEnabled"],
566 self.__FO["Direct"] = Operator(
568 fromMethod = FDA.DirectOperator,
569 reducingMemoryUse = __reduceM,
570 avoidingRedundancy = __avoidRC,
571 inputAsMultiFunction = inputAsMF,
572 extraArguments = self.__extraArgs,
573 enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
574 self.__FO["Tangent"] = Operator(
575 name = self.__name+"Tangent",
576 fromMethod = FDA.TangentOperator,
577 reducingMemoryUse = __reduceM,
578 avoidingRedundancy = __avoidRC,
579 inputAsMultiFunction = inputAsMF,
580 extraArguments = self.__extraArgs )
581 self.__FO["Adjoint"] = Operator(
582 name = self.__name+"Adjoint",
583 fromMethod = FDA.AdjointOperator,
584 reducingMemoryUse = __reduceM,
585 avoidingRedundancy = __avoidRC,
586 inputAsMultiFunction = inputAsMF,
587 extraArguments = self.__extraArgs )
588 elif isinstance(__Function, dict) and \
589 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
590 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
591 self.__FO["Direct"] = Operator(
593 fromMethod = __Function["Direct"],
594 reducingMemoryUse = __reduceM,
595 avoidingRedundancy = __avoidRC,
596 inputAsMultiFunction = inputAsMF,
597 extraArguments = self.__extraArgs,
598 enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
599 self.__FO["Tangent"] = Operator(
600 name = self.__name+"Tangent",
601 fromMethod = __Function["Tangent"],
602 reducingMemoryUse = __reduceM,
603 avoidingRedundancy = __avoidRC,
604 inputAsMultiFunction = inputAsMF,
605 extraArguments = self.__extraArgs )
606 self.__FO["Adjoint"] = Operator(
607 name = self.__name+"Adjoint",
608 fromMethod = __Function["Adjoint"],
609 reducingMemoryUse = __reduceM,
610 avoidingRedundancy = __avoidRC,
611 inputAsMultiFunction = inputAsMF,
612 extraArguments = self.__extraArgs )
613 elif asMatrix is not None:
614 if isinstance(__Matrix, str):
615 __Matrix = PlatformInfo.strmatrix2liststr( __Matrix )
616 __matrice = numpy.asarray( __Matrix, dtype=float )
617 self.__FO["Direct"] = Operator(
619 fromMatrix = __matrice,
620 reducingMemoryUse = __reduceM,
621 avoidingRedundancy = __avoidRC,
622 inputAsMultiFunction = inputAsMF,
623 enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
624 self.__FO["Tangent"] = Operator(
625 name = self.__name+"Tangent",
626 fromMatrix = __matrice,
627 reducingMemoryUse = __reduceM,
628 avoidingRedundancy = __avoidRC,
629 inputAsMultiFunction = inputAsMF )
630 self.__FO["Adjoint"] = Operator(
631 name = self.__name+"Adjoint",
632 fromMatrix = __matrice.T,
633 reducingMemoryUse = __reduceM,
634 avoidingRedundancy = __avoidRC,
635 inputAsMultiFunction = inputAsMF )
639 "The %s object is improperly defined or undefined,"%self.__name+\
640 " it requires at minima either a matrix, a Direct operator for"+\
641 " approximate derivatives or a Tangent/Adjoint operators pair."+\
642 " Please check your operator input.")
644 if __appliedInX is not None:
645 self.__FO["AppliedInX"] = {}
646 for key in __appliedInX:
647 if isinstance(__appliedInX[key], str):
648 __appliedInX[key] = PlatformInfo.strvect2liststr( __appliedInX[key] )
649 self.__FO["AppliedInX"][key] = numpy.ravel( __appliedInX[key] ).reshape((-1,1))
651 self.__FO["AppliedInX"] = None
657 "x.__repr__() <==> repr(x)"
658 return repr(self.__FO)
661 "x.__str__() <==> str(x)"
662 return str(self.__FO)
664 # ==============================================================================
665 class Algorithm(object):
667 Classe générale d'interface de type algorithme
669 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
670 d'assimilation, en fournissant un container (dictionnaire) de variables
671 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
673 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
676 "_name", "_parameters", "__internal_state", "__required_parameters",
677 "_m", "__variable_names_not_public", "__canonical_parameter_name",
678 "__canonical_stored_name", "__replace_by_the_new_name",
682 def __init__(self, name):
684 L'initialisation présente permet de fabriquer des variables de stockage
685 disponibles de manière générique dans les algorithmes élémentaires. Ces
686 variables de stockage sont ensuite conservées dans un dictionnaire
687 interne à l'objet, mais auquel on accède par la méthode "get".
689 Les variables prévues sont :
690 - APosterioriCorrelations : matrice de corrélations de la matrice A
691 - APosterioriCovariance : matrice de covariances a posteriori : A
692 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
693 - APosterioriVariances : vecteur des variances de la matrice A
694 - Analysis : vecteur d'analyse : Xa
695 - BMA : Background moins Analysis : Xa - Xb
696 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
697 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
698 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
699 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
700 - CostFunctionJo : partie observations de la fonction-coût : Jo
701 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
702 - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
703 - CurrentOptimum : état optimal courant lors d'itérations
704 - CurrentState : état courant lors d'itérations
705 - CurrentStepNumber : pas courant d'avancement dans les algorithmes en évolution, à partir de 0
706 - EnsembleOfSimulations : ensemble d'états (sorties, simulations) rangés par colonne dans une matrice
707 - EnsembleOfSnapshots : ensemble d'états rangés par colonne dans une matrice
708 - EnsembleOfStates : ensemble d'états (entrées, paramètres) rangés par colonne dans une matrice
709 - ForecastCovariance : covariance de l'état prédit courant lors d'itérations
710 - ForecastState : état prédit courant lors d'itérations
711 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
712 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
713 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
714 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
715 - Innovation : l'innovation : d = Y - H(X)
716 - InnovationAtCurrentAnalysis : l'innovation à l'état analysé : da = Y - H(Xa)
717 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
718 - InternalCostFunctionJ : ensemble de valeurs internes de fonction-coût J dans un vecteur
719 - InternalCostFunctionJb : ensemble de valeurs internes de fonction-coût Jb dans un vecteur
720 - InternalCostFunctionJb : ensemble de valeurs internes de fonction-coût Jo dans un vecteur
721 - InternalStates : ensemble d'états internes rangés par colonne dans une matrice (=EnsembleOfSnapshots)
722 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
723 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
724 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
725 - KalmanGainAtOptimum : gain de Kalman à l'optimum
726 - MahalanobisConsistency : indicateur de consistance des covariances
727 - OMA : Observation moins Analyse : Y - Xa
728 - OMB : Observation moins Background : Y - Xb
729 - ReducedCoordinates : coordonnées dans la base réduite
730 - Residu : dans le cas des algorithmes de vérification
731 - SampledStateForQuantiles : échantillons d'états pour l'estimation des quantiles
732 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
733 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
734 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
735 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
736 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
737 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
738 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
739 - SingularValues : valeurs singulières provenant d'une décomposition SVD
740 On peut rajouter des variables à stocker dans l'initialisation de
741 l'algorithme élémentaire qui va hériter de cette classe
743 logging.debug("%s Initialisation", str(name))
744 self._m = PlatformInfo.SystemUsage()
746 self._name = str( name )
747 self._parameters = {"StoreSupplementaryCalculations":[]}
748 self.__internal_state = {}
749 self.__required_parameters = {}
750 self.__required_inputs = {
751 "RequiredInputValues":{"mandatory":(), "optional":()},
752 "ClassificationTags":[],
754 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
755 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
756 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
757 self.__replace_by_the_new_name = {} # Nouveau nom à partir d'un nom ancien
759 self.StoredVariables = {}
760 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
761 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
762 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
763 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
764 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
765 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
766 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
767 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
768 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
769 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
770 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
771 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
772 self.StoredVariables["CurrentEnsembleState"] = Persistence.OneMatrix(name = "CurrentEnsembleState")
773 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
774 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
775 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
776 self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
777 self.StoredVariables["EnsembleOfSimulations"] = Persistence.OneMatrix(name = "EnsembleOfSimulations")
778 self.StoredVariables["EnsembleOfSnapshots"] = Persistence.OneMatrix(name = "EnsembleOfSnapshots")
779 self.StoredVariables["EnsembleOfStates"] = Persistence.OneMatrix(name = "EnsembleOfStates")
780 self.StoredVariables["ExcludedPoints"] = Persistence.OneVector(name = "ExcludedPoints")
781 self.StoredVariables["ForecastCovariance"] = Persistence.OneMatrix(name = "ForecastCovariance")
782 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
783 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
784 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
785 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
786 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
787 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
788 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
789 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
790 self.StoredVariables["InternalCostFunctionJ"] = Persistence.OneVector(name = "InternalCostFunctionJ")
791 self.StoredVariables["InternalCostFunctionJb"] = Persistence.OneVector(name = "InternalCostFunctionJb")
792 self.StoredVariables["InternalCostFunctionJo"] = Persistence.OneVector(name = "InternalCostFunctionJo")
793 self.StoredVariables["InternalStates"] = Persistence.OneMatrix(name = "InternalStates")
794 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
795 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
796 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
797 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
798 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
799 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
800 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
801 self.StoredVariables["OptimalPoints"] = Persistence.OneVector(name = "OptimalPoints")
802 self.StoredVariables["ReducedBasis"] = Persistence.OneMatrix(name = "ReducedBasis")
803 self.StoredVariables["ReducedCoordinates"] = Persistence.OneVector(name = "ReducedCoordinates")
804 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
805 self.StoredVariables["Residus"] = Persistence.OneVector(name = "Residus")
806 self.StoredVariables["SampledStateForQuantiles"] = Persistence.OneMatrix(name = "SampledStateForQuantiles")
807 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
808 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
809 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
810 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
811 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
812 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
813 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
814 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
815 self.StoredVariables["SingularValues"] = Persistence.OneVector(name = "SingularValues")
817 for k in self.StoredVariables:
818 self.__canonical_stored_name[k.lower()] = k
820 for k, v in self.__variable_names_not_public.items():
821 self.__canonical_parameter_name[k.lower()] = k
822 self.__canonical_parameter_name["algorithm"] = "Algorithm"
823 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
825 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
827 logging.debug("%s Lancement", self._name)
828 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
829 self._getTimeState(reset=True)
831 # Mise à jour des paramètres internes avec le contenu de Parameters, en
832 # reprenant les valeurs par défauts pour toutes celles non définies
833 self.__setParameters(Parameters, reset=True)
834 for k, v in self.__variable_names_not_public.items():
835 if k not in self._parameters: self.__setParameters( {k:v} )
837 # Corrections et compléments des vecteurs
838 def __test_vvalue(argument, variable, argname, symbol=None):
839 if symbol is None: symbol = variable
841 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
842 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol))
843 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
844 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol))
846 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol))
848 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
849 logging.debug("%s %s vector %s is required and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
850 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
851 logging.debug("%s %s vector %s is optional and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
854 "%s %s vector %s is set although neither required nor optional, and its size is %i."%(
855 self._name,argname,symbol,numpy.array(argument).size))
857 __test_vvalue( Xb, "Xb", "Background or initial state" )
858 __test_vvalue( Y, "Y", "Observation" )
859 __test_vvalue( U, "U", "Control" )
861 # Corrections et compléments des covariances
862 def __test_cvalue(argument, variable, argname, symbol=None):
863 if symbol is None: symbol = variable
865 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
866 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol))
867 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
868 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol))
870 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol))
872 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
873 logging.debug("%s %s error covariance matrix %s is required and set."%(self._name,argname,symbol))
874 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
875 logging.debug("%s %s error covariance matrix %s is optional and set."%(self._name,argname,symbol))
877 logging.debug("%s %s error covariance matrix %s is set although neither required nor optional."%(self._name,argname,symbol))
879 __test_cvalue( B, "B", "Background" )
880 __test_cvalue( R, "R", "Observation" )
881 __test_cvalue( Q, "Q", "Evolution" )
883 # Corrections et compléments des opérateurs
884 def __test_ovalue(argument, variable, argname, symbol=None):
885 if symbol is None: symbol = variable
886 if argument is None or (isinstance(argument,dict) and len(argument)==0):
887 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
888 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol))
889 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
890 logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol))
892 logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol))
894 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
895 logging.debug("%s %s operator %s is required and set."%(self._name,argname,symbol))
896 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
897 logging.debug("%s %s operator %s is optional and set."%(self._name,argname,symbol))
899 logging.debug("%s %s operator %s is set although neither required nor optional."%(self._name,argname,symbol))
901 __test_ovalue( HO, "HO", "Observation", "H" )
902 __test_ovalue( EM, "EM", "Evolution", "M" )
903 __test_ovalue( CM, "CM", "Control Model", "C" )
905 # Corrections et compléments des bornes
906 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
907 logging.debug("%s Bounds taken into account"%(self._name,))
909 self._parameters["Bounds"] = None
910 if ("StateBoundsForQuantiles" in self._parameters) \
911 and isinstance(self._parameters["StateBoundsForQuantiles"], (list, tuple)) \
912 and (len(self._parameters["StateBoundsForQuantiles"]) > 0):
913 logging.debug("%s Bounds for quantiles states taken into account"%(self._name,))
914 # Attention : contrairement à Bounds, pas de défaut à None, sinon on ne peut pas être sans bornes
916 # Corrections et compléments de l'initialisation en X
917 if "InitializationPoint" in self._parameters:
919 if self._parameters["InitializationPoint"] is not None and hasattr(self._parameters["InitializationPoint"],'size'):
920 if self._parameters["InitializationPoint"].size != numpy.ravel(Xb).size:
921 raise ValueError("Incompatible size %i of forced initial point that have to replace the background of size %i" \
922 %(self._parameters["InitializationPoint"].size,numpy.ravel(Xb).size))
923 # Obtenu par typecast : numpy.ravel(self._parameters["InitializationPoint"])
925 self._parameters["InitializationPoint"] = numpy.ravel(Xb)
927 if self._parameters["InitializationPoint"] is None:
928 raise ValueError("Forced initial point can not be set without any given Background or required value")
930 # Correction pour pallier a un bug de TNC sur le retour du Minimum
931 if "Minimizer" in self._parameters and self._parameters["Minimizer"] == "TNC":
932 self.setParameterValue("StoreInternalVariables",True)
934 # Verbosité et logging
935 if logging.getLogger().level < logging.WARNING:
936 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
937 self._parameters["optmessages"] = 15
939 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
940 self._parameters["optmessages"] = 0
944 def _post_run(self,_oH=None):
946 if ("StoreSupplementaryCalculations" in self._parameters) and \
947 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
948 for _A in self.StoredVariables["APosterioriCovariance"]:
949 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
950 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
951 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
952 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
953 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
954 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
955 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
956 self.StoredVariables["APosterioriCorrelations"].store( _C )
957 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
959 "%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i",
960 self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
962 "%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i",
963 self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
964 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
965 logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
966 logging.debug("%s Terminé", self._name)
969 def _toStore(self, key):
970 "True if in StoreSupplementaryCalculations, else False"
971 return key in self._parameters["StoreSupplementaryCalculations"]
973 def get(self, key=None):
975 Renvoie l'une des variables stockées identifiée par la clé, ou le
976 dictionnaire de l'ensemble des variables disponibles en l'absence de
977 clé. Ce sont directement les variables sous forme objet qui sont
978 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
979 des classes de persistance.
982 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
984 return self.StoredVariables
986 def __contains__(self, key=None):
987 "D.__contains__(k) -> True if D has a key k, else False"
988 if key is None or key.lower() not in self.__canonical_stored_name:
991 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
994 "D.keys() -> list of D's keys"
995 if hasattr(self, "StoredVariables"):
996 return self.StoredVariables.keys()
1000 def pop(self, k, d):
1001 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
1002 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
1003 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
1008 raise TypeError("pop expected at least 1 arguments, got 0")
1009 "If key is not found, d is returned if given, otherwise KeyError is raised"
1015 def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
1017 Doit implémenter l'opération élémentaire de calcul algorithmique.
1019 raise NotImplementedError("Mathematical algorithmic calculation has not been implemented!")
1021 def defineRequiredParameter(self,
1033 Permet de définir dans l'algorithme des paramètres requis et leurs
1034 caractéristiques par défaut.
1037 raise ValueError("A name is mandatory to define a required parameter.")
1039 self.__required_parameters[name] = {
1040 "default" : default,
1041 "typecast" : typecast,
1044 "listval" : listval,
1045 "listadv" : listadv,
1046 "message" : message,
1047 "oldname" : oldname,
1049 self.__canonical_parameter_name[name.lower()] = name
1050 if oldname is not None:
1051 self.__canonical_parameter_name[oldname.lower()] = name # Conversion
1052 self.__replace_by_the_new_name[oldname.lower()] = name
1053 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
1055 def getRequiredParameters(self, noDetails=True):
1057 Renvoie la liste des noms de paramètres requis ou directement le
1058 dictionnaire des paramètres requis.
1061 return sorted(self.__required_parameters.keys())
1063 return self.__required_parameters
1065 def setParameterValue(self, name=None, value=None):
1067 Renvoie la valeur d'un paramètre requis de manière contrôlée
1069 __k = self.__canonical_parameter_name[name.lower()]
1070 default = self.__required_parameters[__k]["default"]
1071 typecast = self.__required_parameters[__k]["typecast"]
1072 minval = self.__required_parameters[__k]["minval"]
1073 maxval = self.__required_parameters[__k]["maxval"]
1074 listval = self.__required_parameters[__k]["listval"]
1075 listadv = self.__required_parameters[__k]["listadv"]
1077 if value is None and default is None:
1079 elif value is None and default is not None:
1080 if typecast is None: __val = default
1081 else: __val = typecast( default )
1083 if typecast is None: __val = value
1086 __val = typecast( value )
1088 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
1090 if minval is not None and (numpy.array(__val, float) < minval).any():
1091 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
1092 if maxval is not None and (numpy.array(__val, float) > maxval).any():
1093 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
1094 if listval is not None or listadv is not None:
1095 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
1097 if listval is not None and v in listval: continue
1098 elif listadv is not None and v in listadv: continue
1100 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
1101 elif not (listval is not None and __val in listval) and not (listadv is not None and __val in listadv):
1102 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
1106 def requireInputArguments(self, mandatory=(), optional=()):
1108 Permet d'imposer des arguments de calcul requis en entrée.
1110 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
1111 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
1113 def getInputArguments(self):
1115 Permet d'obtenir les listes des arguments de calcul requis en entrée.
1117 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
1119 def setAttributes(self, tags=()):
1121 Permet d'adjoindre des attributs comme les tags de classification.
1122 Renvoie la liste actuelle dans tous les cas.
1124 self.__required_inputs["ClassificationTags"].extend( tags )
1125 return self.__required_inputs["ClassificationTags"]
1127 def __setParameters(self, fromDico={}, reset=False):
1129 Permet de stocker les paramètres reçus dans le dictionnaire interne.
1131 self._parameters.update( fromDico )
1132 __inverse_fromDico_keys = {}
1133 for k in fromDico.keys():
1134 if k.lower() in self.__canonical_parameter_name:
1135 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
1136 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
1137 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
1139 for k in __inverse_fromDico_keys.values():
1140 if k.lower() in self.__replace_by_the_new_name:
1141 __newk = self.__replace_by_the_new_name[k.lower()]
1142 __msg = "the parameter \"%s\" used in \"%s\" algorithm case is deprecated and has to be replaced by \"%s\"."%(k,self._name,__newk)
1143 __msg += " Please update your code."
1144 warnings.warn(__msg, FutureWarning, stacklevel=50)
1146 for k in self.__required_parameters.keys():
1147 if k in __canonic_fromDico_keys:
1148 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
1150 self._parameters[k] = self.setParameterValue(k)
1153 if hasattr(self._parameters[k],"__len__") and len(self._parameters[k]) > 100:
1154 logging.debug("%s %s de longueur %s", self._name, self.__required_parameters[k]["message"], len(self._parameters[k]))
1156 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
1158 def _setInternalState(self, key=None, value=None, fromDico={}, reset=False):
1160 Permet de stocker des variables nommées constituant l'état interne
1162 if reset: # Vide le dictionnaire préalablement
1163 self.__internal_state = {}
1164 if key is not None and value is not None:
1165 self.__internal_state[key] = value
1166 self.__internal_state.update( dict(fromDico) )
1168 def _getInternalState(self, key=None):
1170 Restitue un état interne sous la forme d'un dictionnaire de variables nommées
1172 if key is not None and key in self.__internal_state:
1173 return self.__internal_state[key]
1175 return self.__internal_state
1177 def _getTimeState(self, reset=False):
1179 Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde
1182 self.__initial_cpu_time = time.process_time()
1183 self.__initial_elapsed_time = time.perf_counter()
1186 self.__cpu_time = time.process_time() - self.__initial_cpu_time
1187 self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time
1188 return self.__cpu_time, self.__elapsed_time
1190 def _StopOnTimeLimit(self, X=None, withReason=False):
1191 "Stop criteria on time limit: True/False [+ Reason]"
1192 c, e = self._getTimeState()
1193 if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]:
1194 __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"])
1195 elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]:
1196 __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"])
1198 __SC, __SR = False, ""
1204 # ==============================================================================
1205 class PartialAlgorithm(object):
1207 Classe pour mimer "Algorithm" du point de vue stockage, mais sans aucune
1208 action avancée comme la vérification . Pour les méthodes reprises ici,
1209 le fonctionnement est identique à celles de la classe "Algorithm".
1212 "_name", "_parameters", "StoredVariables", "__canonical_stored_name",
1215 def __init__(self, name):
1216 self._name = str( name )
1217 self._parameters = {"StoreSupplementaryCalculations":[]}
1219 self.StoredVariables = {}
1220 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
1221 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
1222 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
1223 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
1224 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
1225 self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
1227 self.__canonical_stored_name = {}
1228 for k in self.StoredVariables:
1229 self.__canonical_stored_name[k.lower()] = k
1231 def _toStore(self, key):
1232 "True if in StoreSupplementaryCalculations, else False"
1233 return key in self._parameters["StoreSupplementaryCalculations"]
1235 def get(self, key=None):
1237 Renvoie l'une des variables stockées identifiée par la clé, ou le
1238 dictionnaire de l'ensemble des variables disponibles en l'absence de
1239 clé. Ce sont directement les variables sous forme objet qui sont
1240 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
1241 des classes de persistance.
1244 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
1246 return self.StoredVariables
1248 # ==============================================================================
1249 class AlgorithmAndParameters(object):
1251 Classe générale d'interface d'action pour l'algorithme et ses paramètres
1254 "__name", "__algorithm", "__algorithmFile", "__algorithmName", "__A",
1255 "__P", "__Xb", "__Y", "__U", "__HO", "__EM", "__CM", "__B", "__R",
1256 "__Q", "__variable_names_not_public",
1260 name = "GenericAlgorithm",
1267 self.__name = str(name)
1271 self.__algorithm = {}
1272 self.__algorithmFile = None
1273 self.__algorithmName = None
1275 self.updateParameters( asDict, asScript )
1277 if asAlgorithm is None and asScript is not None:
1278 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1280 __Algo = asAlgorithm
1282 if __Algo is not None:
1283 self.__A = str(__Algo)
1284 self.__P.update( {"Algorithm":self.__A} )
1286 self.__setAlgorithm( self.__A )
1288 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
1290 def updateParameters(self,
1294 "Mise à jour des paramètres"
1295 if asDict is None and asScript is not None:
1296 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1300 if __Dict is not None:
1301 self.__P.update( dict(__Dict) )
1303 def executePythonScheme(self, asDictAO = None):
1304 "Permet de lancer le calcul d'assimilation"
1305 Operator.CM.clearCache()
1307 if not isinstance(asDictAO, dict):
1308 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1309 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
1310 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
1311 else: self.__Xb = None
1312 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
1313 else: self.__Y = asDictAO["Observation"]
1314 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
1315 else: self.__U = asDictAO["ControlInput"]
1316 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
1317 else: self.__HO = asDictAO["ObservationOperator"]
1318 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
1319 else: self.__EM = asDictAO["EvolutionModel"]
1320 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
1321 else: self.__CM = asDictAO["ControlModel"]
1322 self.__B = asDictAO["BackgroundError"]
1323 self.__R = asDictAO["ObservationError"]
1324 self.__Q = asDictAO["EvolutionError"]
1326 self.__shape_validate()
1328 self.__algorithm.run(
1338 Parameters = self.__P,
1342 def executeYACSScheme(self, FileName=None):
1343 "Permet de lancer le calcul d'assimilation"
1344 if FileName is None or not os.path.exists(FileName):
1345 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1347 __file = os.path.abspath(FileName)
1348 logging.debug("The YACS file name is \"%s\"."%__file)
1349 if not PlatformInfo.has_salome or \
1350 not PlatformInfo.has_yacs or \
1351 not PlatformInfo.has_adao:
1352 raise ImportError("\n\n"+\
1353 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
1354 "Please load the right environnement before trying to use it.\n")
1357 import SALOMERuntime
1359 SALOMERuntime.RuntimeSALOME_setRuntime()
1361 r = pilot.getRuntime()
1362 xmlLoader = loader.YACSLoader()
1363 xmlLoader.registerProcCataLoader()
1365 catalogAd = r.loadCatalog("proc", __file)
1366 r.addCatalog(catalogAd)
1371 p = xmlLoader.load(__file)
1372 except IOError as ex:
1373 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1375 logger = p.getLogger("parser")
1376 if not logger.isEmpty():
1377 print("The imported YACS XML schema has errors on parsing:")
1378 print(logger.getStr())
1381 print("The YACS XML schema is not valid and will not be executed:")
1382 print(p.getErrorReport())
1384 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1385 p.checkConsistency(info)
1386 if info.areWarningsOrErrors():
1387 print("The YACS XML schema is not coherent and will not be executed:")
1388 print(info.getGlobalRepr())
1390 e = pilot.ExecutorSwig()
1392 if p.getEffectiveState() != pilot.DONE:
1393 print(p.getErrorReport())
1397 def get(self, key = None):
1398 "Vérifie l'existence d'une clé de variable ou de paramètres"
1399 if key in self.__algorithm:
1400 return self.__algorithm.get( key )
1401 elif key in self.__P:
1402 return self.__P[key]
1404 allvariables = self.__P
1405 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1408 def pop(self, k, d):
1409 "Necessaire pour le pickling"
1410 return self.__algorithm.pop(k, d)
1412 def getAlgorithmRequiredParameters(self, noDetails=True):
1413 "Renvoie la liste des paramètres requis selon l'algorithme"
1414 return self.__algorithm.getRequiredParameters(noDetails)
1416 def getAlgorithmInputArguments(self):
1417 "Renvoie la liste des entrées requises selon l'algorithme"
1418 return self.__algorithm.getInputArguments()
1420 def getAlgorithmAttributes(self):
1421 "Renvoie la liste des attributs selon l'algorithme"
1422 return self.__algorithm.setAttributes()
1424 def setObserver(self, __V, __O, __I, __S):
1425 if self.__algorithm is None \
1426 or isinstance(self.__algorithm, dict) \
1427 or not hasattr(self.__algorithm,"StoredVariables"):
1428 raise ValueError("No observer can be build before choosing an algorithm.")
1429 if __V not in self.__algorithm:
1430 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1432 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1435 HookParameters = __I,
1438 def removeObserver(self, __V, __O, __A = False):
1439 if self.__algorithm is None \
1440 or isinstance(self.__algorithm, dict) \
1441 or not hasattr(self.__algorithm,"StoredVariables"):
1442 raise ValueError("No observer can be removed before choosing an algorithm.")
1443 if __V not in self.__algorithm:
1444 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1446 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1451 def hasObserver(self, __V):
1452 if self.__algorithm is None \
1453 or isinstance(self.__algorithm, dict) \
1454 or not hasattr(self.__algorithm,"StoredVariables"):
1456 if __V not in self.__algorithm:
1458 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1461 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1462 for k in self.__variable_names_not_public:
1463 if k in __allvariables: __allvariables.remove(k)
1464 return __allvariables
1466 def __contains__(self, key=None):
1467 "D.__contains__(k) -> True if D has a key k, else False"
1468 return key in self.__algorithm or key in self.__P
1471 "x.__repr__() <==> repr(x)"
1472 return repr(self.__A)+", "+repr(self.__P)
1475 "x.__str__() <==> str(x)"
1476 return str(self.__A)+", "+str(self.__P)
1478 def __setAlgorithm(self, choice = None ):
1480 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1481 d'assimilation. L'argument est un champ caractère se rapportant au nom
1482 d'un algorithme réalisant l'opération sur les arguments fixes.
1485 raise ValueError("Error: algorithm choice has to be given")
1486 if self.__algorithmName is not None:
1487 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1488 daDirectory = "daAlgorithms"
1490 # Recherche explicitement le fichier complet
1491 # ------------------------------------------
1493 for directory in sys.path:
1494 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1495 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1496 if module_path is None:
1498 "No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1500 # Importe le fichier complet comme un module
1501 # ------------------------------------------
1503 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1504 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1505 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1506 raise ImportError("this module does not define a valid elementary algorithm.")
1507 self.__algorithmName = str(choice)
1508 sys.path = sys_path_tmp ; del sys_path_tmp
1509 except ImportError as e:
1511 "The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1513 # Instancie un objet du type élémentaire du fichier
1514 # -------------------------------------------------
1515 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1518 def __shape_validate(self):
1520 Validation de la correspondance correcte des tailles des variables et
1521 des matrices s'il y en a.
1523 if self.__Xb is None: __Xb_shape = (0,)
1524 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1525 elif hasattr(self.__Xb,"shape"):
1526 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1527 else: __Xb_shape = self.__Xb.shape()
1528 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1530 if self.__Y is None: __Y_shape = (0,)
1531 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1532 elif hasattr(self.__Y,"shape"):
1533 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1534 else: __Y_shape = self.__Y.shape()
1535 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1537 if self.__U is None: __U_shape = (0,)
1538 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1539 elif hasattr(self.__U,"shape"):
1540 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1541 else: __U_shape = self.__U.shape()
1542 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1544 if self.__B is None: __B_shape = (0,0)
1545 elif hasattr(self.__B,"shape"):
1546 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1547 else: __B_shape = self.__B.shape()
1548 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1550 if self.__R is None: __R_shape = (0,0)
1551 elif hasattr(self.__R,"shape"):
1552 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1553 else: __R_shape = self.__R.shape()
1554 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1556 if self.__Q is None: __Q_shape = (0,0)
1557 elif hasattr(self.__Q,"shape"):
1558 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1559 else: __Q_shape = self.__Q.shape()
1560 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1562 if len(self.__HO) == 0: __HO_shape = (0,0)
1563 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1564 elif hasattr(self.__HO["Direct"],"shape"):
1565 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1566 else: __HO_shape = self.__HO["Direct"].shape()
1567 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1569 if len(self.__EM) == 0: __EM_shape = (0,0)
1570 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1571 elif hasattr(self.__EM["Direct"],"shape"):
1572 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1573 else: __EM_shape = self.__EM["Direct"].shape()
1574 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1576 if len(self.__CM) == 0: __CM_shape = (0,0)
1577 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1578 elif hasattr(self.__CM["Direct"],"shape"):
1579 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1580 else: __CM_shape = self.__CM["Direct"].shape()
1581 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1583 # Vérification des conditions
1584 # ---------------------------
1585 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1586 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1587 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1588 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1590 if not( min(__B_shape) == max(__B_shape) ):
1591 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1592 if not( min(__R_shape) == max(__R_shape) ):
1593 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1594 if not( min(__Q_shape) == max(__Q_shape) ):
1595 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1596 if not( min(__EM_shape) == max(__EM_shape) ):
1597 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1599 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1601 "Shape characteristic of observation operator (H)"+\
1602 " \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1603 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1605 "Shape characteristic of observation operator (H)"+\
1606 " \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1607 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1609 "Shape characteristic of observation operator (H)"+\
1610 " \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1611 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1613 "Shape characteristic of observation operator (H)"+\
1614 " \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1616 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1617 if self.__algorithmName in ["EnsembleBlue",]:
1618 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1619 self.__Xb = Persistence.OneVector("Background")
1620 for member in asPersistentVector:
1621 self.__Xb.store( numpy.asarray(member, dtype=float) )
1622 __Xb_shape = min(__B_shape)
1625 "Shape characteristic of a priori errors covariance matrix (B)"+\
1626 " \"%s\" and background vector (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1628 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1630 "Shape characteristic of observation errors covariance matrix (R)"+\
1631 " \"%s\" and observation vector (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1633 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1635 "Shape characteristic of evolution model (EM)"+\
1636 " \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1638 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1640 "Shape characteristic of control model (CM)"+\
1641 " \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1643 if ("Bounds" in self.__P) \
1644 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1645 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1646 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1647 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1649 if ("StateBoundsForQuantiles" in self.__P) \
1650 and (isinstance(self.__P["StateBoundsForQuantiles"], list) or isinstance(self.__P["StateBoundsForQuantiles"], tuple)) \
1651 and (len(self.__P["StateBoundsForQuantiles"]) != max(__Xb_shape)):
1652 raise ValueError("The number \"%s\" of bound pairs for the quantile state (X) components is different of the size \"%s\" of the state itself." \
1653 %(len(self.__P["StateBoundsForQuantiles"]),max(__Xb_shape)))
1657 # ==============================================================================
1658 class RegulationAndParameters(object):
1660 Classe générale d'interface d'action pour la régulation et ses paramètres
1662 __slots__ = ("__name", "__P")
1665 name = "GenericRegulation",
1672 self.__name = str(name)
1675 if asAlgorithm is None and asScript is not None:
1676 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1678 __Algo = asAlgorithm
1680 if asDict is None and asScript is not None:
1681 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1685 if __Dict is not None:
1686 self.__P.update( dict(__Dict) )
1688 if __Algo is not None:
1689 self.__P.update( {"Algorithm":str(__Algo)} )
1691 def get(self, key = None):
1692 "Vérifie l'existence d'une clé de variable ou de paramètres"
1694 return self.__P[key]
1698 # ==============================================================================
1699 class DataObserver(object):
1701 Classe générale d'interface de type observer
1703 __slots__ = ("__name", "__V", "__O", "__I")
1706 name = "GenericObserver",
1718 self.__name = str(name)
1723 if onVariable is None:
1724 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1725 elif type(onVariable) in (tuple, list):
1726 self.__V = tuple(map( str, onVariable ))
1727 if withInfo is None:
1730 self.__I = (str(withInfo),)*len(self.__V)
1731 elif isinstance(onVariable, str):
1732 self.__V = (onVariable,)
1733 if withInfo is None:
1734 self.__I = (onVariable,)
1736 self.__I = (str(withInfo),)
1738 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1740 if asObsObject is not None:
1741 self.__O = asObsObject
1743 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1744 __Function = Observer2Func(__FunctionText)
1745 self.__O = __Function.getfunc()
1747 for k in range(len(self.__V)):
1750 if ename not in withAlgo:
1751 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1753 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1756 "x.__repr__() <==> repr(x)"
1757 return repr(self.__V)+"\n"+repr(self.__O)
1760 "x.__str__() <==> str(x)"
1761 return str(self.__V)+"\n"+str(self.__O)
1763 # ==============================================================================
1764 class UserScript(object):
1766 Classe générale d'interface de type texte de script utilisateur
1768 __slots__ = ("__name", "__F")
1771 name = "GenericUserScript",
1778 self.__name = str(name)
1780 if asString is not None:
1782 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1783 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1784 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1785 self.__F = Templates.ObserverTemplates[asTemplate]
1786 elif asScript is not None:
1787 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1792 "x.__repr__() <==> repr(x)"
1793 return repr(self.__F)
1796 "x.__str__() <==> str(x)"
1797 return str(self.__F)
1799 # ==============================================================================
1800 class ExternalParameters(object):
1802 Classe générale d'interface pour le stockage des paramètres externes
1804 __slots__ = ("__name", "__P")
1807 name = "GenericExternalParameters",
1813 self.__name = str(name)
1816 self.updateParameters( asDict, asScript )
1818 def updateParameters(self,
1822 "Mise à jour des paramètres"
1823 if asDict is None and asScript is not None:
1824 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1828 if __Dict is not None:
1829 self.__P.update( dict(__Dict) )
1831 def get(self, key = None):
1833 return self.__P[key]
1835 return list(self.__P.keys())
1838 return list(self.__P.keys())
1840 def pop(self, k, d):
1841 return self.__P.pop(k, d)
1844 return self.__P.items()
1846 def __contains__(self, key=None):
1847 "D.__contains__(k) -> True if D has a key k, else False"
1848 return key in self.__P
1850 # ==============================================================================
1851 class State(object):
1853 Classe générale d'interface de type état
1856 "__name", "__check", "__V", "__T", "__is_vector", "__is_series",
1861 name = "GenericVector",
1863 asPersistentVector = None,
1869 toBeChecked = False,
1872 Permet de définir un vecteur :
1873 - asVector : entrée des données, comme un vecteur compatible avec le
1874 constructeur de numpy.matrix, ou "True" si entrée par script.
1875 - asPersistentVector : entrée des données, comme une série de vecteurs
1876 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1877 type Persistence, ou "True" si entrée par script.
1878 - asScript : si un script valide est donné contenant une variable
1879 nommée "name", la variable est de type "asVector" (par défaut) ou
1880 "asPersistentVector" selon que l'une de ces variables est placée à
1882 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1883 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1884 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1885 nommée "name"), on récupère les colonnes et on les range ligne après
1886 ligne (colMajor=False, par défaut) ou colonne après colonne
1887 (colMajor=True). La variable résultante est de type "asVector" (par
1888 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1891 self.__name = str(name)
1892 self.__check = bool(toBeChecked)
1896 self.__is_vector = False
1897 self.__is_series = False
1899 if asScript is not None:
1900 __Vector, __Series = None, None
1901 if asPersistentVector:
1902 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1904 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1905 elif asDataFile is not None:
1906 __Vector, __Series = None, None
1907 if asPersistentVector:
1908 if colNames is not None:
1909 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1911 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1912 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1913 __Series = numpy.transpose(__Series)
1914 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1915 __Series = numpy.transpose(__Series)
1917 if colNames is not None:
1918 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1920 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1922 __Vector = numpy.ravel(__Vector, order = "F")
1924 __Vector = numpy.ravel(__Vector, order = "C")
1926 __Vector, __Series = asVector, asPersistentVector
1928 if __Vector is not None:
1929 self.__is_vector = True
1930 if isinstance(__Vector, str):
1931 __Vector = PlatformInfo.strvect2liststr( __Vector )
1932 self.__V = numpy.ravel(numpy.asarray( __Vector, dtype=float )).reshape((-1,1))
1933 self.shape = self.__V.shape
1934 self.size = self.__V.size
1935 elif __Series is not None:
1936 self.__is_series = True
1937 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1938 self.__V = Persistence.OneVector(self.__name)
1939 if isinstance(__Series, str):
1940 __Series = PlatformInfo.strmatrix2liststr(__Series)
1941 for member in __Series:
1942 if isinstance(member, str):
1943 member = PlatformInfo.strvect2liststr( member )
1944 self.__V.store(numpy.asarray( member, dtype=float ))
1947 if isinstance(self.__V.shape, (tuple, list)):
1948 self.shape = self.__V.shape
1950 self.shape = self.__V.shape()
1951 if len(self.shape) == 1:
1952 self.shape = (self.shape[0],1)
1953 self.size = self.shape[0] * self.shape[1]
1956 "The %s object is improperly defined or undefined,"%self.__name+\
1957 " it requires at minima either a vector, a list/tuple of"+\
1958 " vectors or a persistent object. Please check your vector input.")
1960 if scheduledBy is not None:
1961 self.__T = scheduledBy
1963 def getO(self, withScheduler=False):
1965 return self.__V, self.__T
1966 elif self.__T is None:
1972 "Vérification du type interne"
1973 return self.__is_vector
1976 "Vérification du type interne"
1977 return self.__is_series
1980 "x.__repr__() <==> repr(x)"
1981 return repr(self.__V)
1984 "x.__str__() <==> str(x)"
1985 return str(self.__V)
1987 # ==============================================================================
1988 class Covariance(object):
1990 Classe générale d'interface de type covariance
1993 "__name", "__check", "__C", "__is_scalar", "__is_vector", "__is_matrix",
1994 "__is_object", "shape", "size",
1998 name = "GenericCovariance",
1999 asCovariance = None,
2000 asEyeByScalar = None,
2001 asEyeByVector = None,
2004 toBeChecked = False,
2007 Permet de définir une covariance :
2008 - asCovariance : entrée des données, comme une matrice compatible avec
2009 le constructeur de numpy.matrix
2010 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
2011 multiplicatif d'une matrice de corrélation identité, aucune matrice
2012 n'étant donc explicitement à donner
2013 - asEyeByVector : entrée des données comme un seul vecteur de variance,
2014 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
2015 n'étant donc explicitement à donner
2016 - asCovObject : entrée des données comme un objet python, qui a les
2017 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
2018 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
2019 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
2020 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
2021 pleine doit être vérifié
2023 self.__name = str(name)
2024 self.__check = bool(toBeChecked)
2027 self.__is_scalar = False
2028 self.__is_vector = False
2029 self.__is_matrix = False
2030 self.__is_object = False
2032 if asScript is not None:
2033 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
2035 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2037 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2039 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2041 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2043 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
2045 if __Scalar is not None:
2046 if isinstance(__Scalar, str):
2047 __Scalar = PlatformInfo.strvect2liststr( __Scalar )
2048 if len(__Scalar) > 0: __Scalar = __Scalar[0]
2049 if numpy.array(__Scalar).size != 1:
2051 " The diagonal multiplier given to define a sparse matrix is"+\
2052 " not a unique scalar value.\n Its actual measured size is"+\
2053 " %i. Please check your scalar input."%numpy.array(__Scalar).size)
2054 self.__is_scalar = True
2055 self.__C = numpy.abs( float(__Scalar) )
2058 elif __Vector is not None:
2059 if isinstance(__Vector, str):
2060 __Vector = PlatformInfo.strvect2liststr( __Vector )
2061 self.__is_vector = True
2062 self.__C = numpy.abs( numpy.ravel(numpy.asarray( __Vector, dtype=float )) )
2063 self.shape = (self.__C.size,self.__C.size)
2064 self.size = self.__C.size**2
2065 elif __Matrix is not None:
2066 self.__is_matrix = True
2067 self.__C = numpy.matrix( __Matrix, float )
2068 self.shape = self.__C.shape
2069 self.size = self.__C.size
2070 elif __Object is not None:
2071 self.__is_object = True
2073 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__matmul__","__mul__","__rmatmul__","__rmul__"):
2074 if not hasattr(self.__C,at):
2075 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
2076 if hasattr(self.__C,"shape"):
2077 self.shape = self.__C.shape
2080 if hasattr(self.__C,"size"):
2081 self.size = self.__C.size
2089 def __validate(self):
2091 if self.__C is None:
2092 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
2093 if self.ismatrix() and min(self.shape) != max(self.shape):
2094 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
2095 if self.isobject() and min(self.shape) != max(self.shape):
2096 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
2097 if self.isscalar() and self.__C <= 0:
2098 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
2099 if self.isvector() and (self.__C <= 0).any():
2100 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
2101 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
2103 numpy.linalg.cholesky( self.__C )
2105 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
2106 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
2110 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
2113 "Vérification du type interne"
2114 return self.__is_scalar
2117 "Vérification du type interne"
2118 return self.__is_vector
2121 "Vérification du type interne"
2122 return self.__is_matrix
2125 "Vérification du type interne"
2126 return self.__is_object
2131 return Covariance(self.__name+"I", asCovariance = numpy.linalg.inv(self.__C) )
2132 elif self.isvector():
2133 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
2134 elif self.isscalar():
2135 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
2136 elif self.isobject() and hasattr(self.__C,"getI"):
2137 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
2139 return None # Indispensable
2144 return Covariance(self.__name+"T", asCovariance = self.__C.T )
2145 elif self.isvector():
2146 return Covariance(self.__name+"T", asEyeByVector = self.__C )
2147 elif self.isscalar():
2148 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
2149 elif self.isobject() and hasattr(self.__C,"getT"):
2150 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
2152 raise AttributeError("the %s covariance matrix has no getT attribute."%(self.__name,))
2155 "Décomposition de Cholesky"
2157 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
2158 elif self.isvector():
2159 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
2160 elif self.isscalar():
2161 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
2162 elif self.isobject() and hasattr(self.__C,"cholesky"):
2163 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
2165 raise AttributeError("the %s covariance matrix has no cholesky attribute."%(self.__name,))
2167 def choleskyI(self):
2168 "Inversion de la décomposition de Cholesky"
2170 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.linalg.cholesky(self.__C)) )
2171 elif self.isvector():
2172 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
2173 elif self.isscalar():
2174 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
2175 elif self.isobject() and hasattr(self.__C,"choleskyI"):
2176 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
2178 raise AttributeError("the %s covariance matrix has no choleskyI attribute."%(self.__name,))
2181 "Racine carrée matricielle"
2184 return Covariance(self.__name+"C", asCovariance = numpy.real(scipy.linalg.sqrtm(self.__C)) )
2185 elif self.isvector():
2186 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
2187 elif self.isscalar():
2188 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
2189 elif self.isobject() and hasattr(self.__C,"sqrtm"):
2190 return Covariance(self.__name+"C", asCovObject = self.__C.sqrtm() )
2192 raise AttributeError("the %s covariance matrix has no sqrtm attribute."%(self.__name,))
2195 "Inversion de la racine carrée matricielle"
2198 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.real(scipy.linalg.sqrtm(self.__C))) )
2199 elif self.isvector():
2200 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
2201 elif self.isscalar():
2202 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
2203 elif self.isobject() and hasattr(self.__C,"sqrtmI"):
2204 return Covariance(self.__name+"H", asCovObject = self.__C.sqrtmI() )
2206 raise AttributeError("the %s covariance matrix has no sqrtmI attribute."%(self.__name,))
2208 def diag(self, msize=None):
2209 "Diagonale de la matrice"
2211 return numpy.diag(self.__C)
2212 elif self.isvector():
2214 elif self.isscalar():
2216 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2218 return self.__C * numpy.ones(int(msize))
2219 elif self.isobject() and hasattr(self.__C,"diag"):
2220 return self.__C.diag()
2222 raise AttributeError("the %s covariance matrix has no diag attribute."%(self.__name,))
2224 def trace(self, msize=None):
2225 "Trace de la matrice"
2227 return numpy.trace(self.__C)
2228 elif self.isvector():
2229 return float(numpy.sum(self.__C))
2230 elif self.isscalar():
2232 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2234 return self.__C * int(msize)
2235 elif self.isobject():
2236 return self.__C.trace()
2238 raise AttributeError("the %s covariance matrix has no trace attribute."%(self.__name,))
2240 def asfullmatrix(self, msize=None):
2243 return numpy.asarray(self.__C, dtype=float)
2244 elif self.isvector():
2245 return numpy.asarray( numpy.diag(self.__C), dtype=float )
2246 elif self.isscalar():
2248 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2250 return numpy.asarray( self.__C * numpy.eye(int(msize)), dtype=float )
2251 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
2252 return self.__C.asfullmatrix()
2254 raise AttributeError("the %s covariance matrix has no asfullmatrix attribute."%(self.__name,))
2256 def assparsematrix(self):
2264 "x.__repr__() <==> repr(x)"
2265 return repr(self.__C)
2268 "x.__str__() <==> str(x)"
2269 return str(self.__C)
2271 def __add__(self, other):
2272 "x.__add__(y) <==> x+y"
2273 if self.ismatrix() or self.isobject():
2274 return self.__C + numpy.asmatrix(other)
2275 elif self.isvector() or self.isscalar():
2276 _A = numpy.asarray(other)
2277 if len(_A.shape) == 1:
2278 _A.reshape((-1,1))[::2] += self.__C
2280 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
2281 return numpy.asmatrix(_A)
2283 def __radd__(self, other):
2284 "x.__radd__(y) <==> y+x"
2285 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
2287 def __sub__(self, other):
2288 "x.__sub__(y) <==> x-y"
2289 if self.ismatrix() or self.isobject():
2290 return self.__C - numpy.asmatrix(other)
2291 elif self.isvector() or self.isscalar():
2292 _A = numpy.asarray(other)
2293 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
2294 return numpy.asmatrix(_A)
2296 def __rsub__(self, other):
2297 "x.__rsub__(y) <==> y-x"
2298 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
2301 "x.__neg__() <==> -x"
2304 def __matmul__(self, other):
2305 "x.__mul__(y) <==> x@y"
2306 if self.ismatrix() and isinstance(other, (int, float)):
2307 return numpy.asarray(self.__C) * other
2308 elif self.ismatrix() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2309 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2310 return numpy.ravel(self.__C @ numpy.ravel(other))
2311 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2312 return numpy.asarray(self.__C) @ numpy.asarray(other)
2314 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asarray(other).shape,self.__name))
2315 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2316 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2317 return numpy.ravel(self.__C) * numpy.ravel(other)
2318 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2319 return numpy.ravel(self.__C).reshape((-1,1)) * numpy.asarray(other)
2321 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2322 elif self.isscalar() and isinstance(other,numpy.matrix):
2323 return numpy.asarray(self.__C * other)
2324 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2325 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2326 return self.__C * numpy.ravel(other)
2328 return self.__C * numpy.asarray(other)
2329 elif self.isobject():
2330 return self.__C.__matmul__(other)
2332 raise NotImplementedError("%s covariance matrix __matmul__ method not available for %s type!"%(self.__name,type(other)))
2334 def __mul__(self, other):
2335 "x.__mul__(y) <==> x*y"
2336 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2337 return self.__C * other
2338 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2339 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2340 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2341 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2342 return self.__C * numpy.asmatrix(other)
2345 "operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
2346 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2347 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2348 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
2349 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2350 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
2353 "operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2354 elif self.isscalar() and isinstance(other,numpy.matrix):
2355 return self.__C * other
2356 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2357 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2358 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2360 return self.__C * numpy.asmatrix(other)
2361 elif self.isobject():
2362 return self.__C.__mul__(other)
2364 raise NotImplementedError(
2365 "%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
2367 def __rmatmul__(self, other):
2368 "x.__rmul__(y) <==> y@x"
2369 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2370 return other * self.__C
2371 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2372 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2373 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2374 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2375 return numpy.asmatrix(other) * self.__C
2378 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2379 elif self.isvector() and isinstance(other,numpy.matrix):
2380 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2381 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2382 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2383 return numpy.asmatrix(numpy.array(other) * self.__C)
2386 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2387 elif self.isscalar() and isinstance(other,numpy.matrix):
2388 return other * self.__C
2389 elif self.isobject():
2390 return self.__C.__rmatmul__(other)
2392 raise NotImplementedError(
2393 "%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name,type(other)))
2395 def __rmul__(self, other):
2396 "x.__rmul__(y) <==> y*x"
2397 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2398 return other * self.__C
2399 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2400 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2401 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2402 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2403 return numpy.asmatrix(other) * self.__C
2406 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2407 elif self.isvector() and isinstance(other,numpy.matrix):
2408 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2409 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2410 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2411 return numpy.asmatrix(numpy.array(other) * self.__C)
2414 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2415 elif self.isscalar() and isinstance(other,numpy.matrix):
2416 return other * self.__C
2417 elif self.isscalar() and isinstance(other,float):
2418 return other * self.__C
2419 elif self.isobject():
2420 return self.__C.__rmul__(other)
2422 raise NotImplementedError(
2423 "%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
2426 "x.__len__() <==> len(x)"
2427 return self.shape[0]
2429 # ==============================================================================
2430 class Observer2Func(object):
2432 Création d'une fonction d'observateur a partir de son texte
2434 __slots__ = ("__corps")
2436 def __init__(self, corps=""):
2437 self.__corps = corps
2438 def func(self,var,info):
2439 "Fonction d'observation"
2442 "Restitution du pointeur de fonction dans l'objet"
2445 # ==============================================================================
2446 class CaseLogger(object):
2448 Conservation des commandes de création d'un cas
2451 "__name", "__objname", "__logSerie", "__switchoff", "__viewers",
2455 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
2456 self.__name = str(__name)
2457 self.__objname = str(__objname)
2458 self.__logSerie = []
2459 self.__switchoff = False
2461 "TUI" :Interfaces._TUIViewer,
2462 "SCD" :Interfaces._SCDViewer,
2463 "YACS":Interfaces._YACSViewer,
2464 "SimpleReportInRst":Interfaces._SimpleReportInRstViewer,
2465 "SimpleReportInHtml":Interfaces._SimpleReportInHtmlViewer,
2466 "SimpleReportInPlainTxt":Interfaces._SimpleReportInPlainTxtViewer,
2469 "TUI" :Interfaces._TUIViewer,
2470 "COM" :Interfaces._COMViewer,
2472 if __addViewers is not None:
2473 self.__viewers.update(dict(__addViewers))
2474 if __addLoaders is not None:
2475 self.__loaders.update(dict(__addLoaders))
2477 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
2478 "Enregistrement d'une commande individuelle"
2479 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
2480 if "self" in __keys: __keys.remove("self")
2481 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
2483 self.__switchoff = True
2485 self.__switchoff = False
2487 def dump(self, __filename=None, __format="TUI", __upa=""):
2488 "Restitution normalisée des commandes"
2489 if __format in self.__viewers:
2490 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2492 raise ValueError("Dumping as \"%s\" is not available"%__format)
2493 return __formater.dump(__filename, __upa)
2495 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2496 "Chargement normalisé des commandes"
2497 if __format in self.__loaders:
2498 __formater = self.__loaders[__format]()
2500 raise ValueError("Loading as \"%s\" is not available"%__format)
2501 return __formater.load(__filename, __content, __object)
2503 # ==============================================================================
2506 _extraArguments = None,
2507 _sFunction = lambda x: x,
2512 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2513 correspondante de valeurs de la fonction en argument
2515 # Vérifications et définitions initiales
2516 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2517 if not PlatformInfo.isIterable( __xserie ):
2518 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2520 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2523 __mpWorkers = int(_mpWorkers)
2525 import multiprocessing
2536 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2537 with multiprocessing.Pool(__mpWorkers) as pool:
2538 __multiHX = pool.map( _sFunction, _jobs )
2541 # logging.debug("MULTF Internal multiprocessing calculation end")
2543 # logging.debug("MULTF Internal monoprocessing calculation begin")
2545 if _extraArguments is None:
2546 for __xvalue in __xserie:
2547 __multiHX.append( _sFunction( __xvalue ) )
2548 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2549 for __xvalue in __xserie:
2550 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2551 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2552 for __xvalue in __xserie:
2553 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2555 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2556 # logging.debug("MULTF Internal monoprocessing calculation end")
2558 # logging.debug("MULTF Internal multifonction calculations end")
2561 # ==============================================================================
2562 if __name__ == "__main__":
2563 print('\n AUTODIAGNOSTIC\n')