1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2023 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
36 from functools import partial
37 from daCore import Persistence
38 from daCore import PlatformInfo
39 from daCore import Interfaces
40 from daCore import Templates
42 # ==============================================================================
43 class CacheManager(object):
45 Classe générale de gestion d'un cache de calculs
48 "__tolerBP", "__lengthOR", "__initlnOR", "__seenNames", "__enabled",
53 toleranceInRedundancy = 1.e-18,
54 lengthOfRedundancy = -1,
57 Les caractéristiques de tolérance peuvent être modifiées à la création.
59 self.__tolerBP = float(toleranceInRedundancy)
60 self.__lengthOR = int(lengthOfRedundancy)
61 self.__initlnOR = self.__lengthOR
71 def wasCalculatedIn(self, xValue, oName="" ):
72 "Vérifie l'existence d'un calcul correspondant à la valeur"
76 for i in range(min(len(self.__listOPCV),self.__lengthOR)-1,-1,-1):
77 if not hasattr(xValue, 'size'):
79 elif (str(oName) != self.__listOPCV[i][3]):
81 elif (xValue.size != self.__listOPCV[i][0].size):
83 elif (numpy.ravel(xValue)[0] - self.__listOPCV[i][0][0]) > (self.__tolerBP * self.__listOPCV[i][2] / self.__listOPCV[i][0].size):
85 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < (self.__tolerBP * self.__listOPCV[i][2]):
87 __HxV = self.__listOPCV[i][1]
91 def storeValueInX(self, xValue, HxValue, oName="" ):
92 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
93 if self.__lengthOR < 0:
94 self.__lengthOR = 2 * min(numpy.size(xValue), 50) + 2
95 self.__initlnOR = self.__lengthOR
96 self.__seenNames.append(str(oName))
97 if str(oName) not in self.__seenNames: # Etend la liste si nouveau
98 self.__lengthOR += 2 * min(numpy.size(xValue), 50) + 2
99 self.__initlnOR += self.__lengthOR
100 self.__seenNames.append(str(oName))
101 while len(self.__listOPCV) > self.__lengthOR:
102 self.__listOPCV.pop(0)
103 self.__listOPCV.append( (
104 copy.copy(numpy.ravel(xValue)), # 0 Previous point
105 copy.copy(HxValue), # 1 Previous value
106 numpy.linalg.norm(xValue), # 2 Norm
107 str(oName), # 3 Operator name
112 self.__initlnOR = self.__lengthOR
114 self.__enabled = False
118 self.__lengthOR = self.__initlnOR
119 self.__enabled = True
121 # ==============================================================================
122 class Operator(object):
124 Classe générale d'interface de type opérateur simple
127 "__name", "__NbCallsAsMatrix", "__NbCallsAsMethod",
128 "__NbCallsOfCached", "__reduceM", "__avoidRC", "__inputAsMF",
129 "__mpEnabled", "__extraArgs", "__Method", "__Matrix", "__Type",
138 name = "GenericOperator",
141 avoidingRedundancy = True,
142 reducingMemoryUse = False,
143 inputAsMultiFunction = False,
144 enableMultiProcess = False,
145 extraArguments = None,
148 On construit un objet de ce type en fournissant, à l'aide de l'un des
149 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
152 - name : nom d'opérateur
153 - fromMethod : argument de type fonction Python
154 - fromMatrix : argument adapté au constructeur numpy.array/matrix
155 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
156 - reducingMemoryUse : booléen forçant (ou pas) des calculs moins
158 - inputAsMultiFunction : booléen indiquant une fonction explicitement
159 définie (ou pas) en multi-fonction
160 - extraArguments : arguments supplémentaires passés à la fonction de
161 base et ses dérivées (tuple ou dictionnaire)
163 self.__name = str(name)
164 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
165 self.__reduceM = bool( reducingMemoryUse )
166 self.__avoidRC = bool( avoidingRedundancy )
167 self.__inputAsMF = bool( inputAsMultiFunction )
168 self.__mpEnabled = bool( enableMultiProcess )
169 self.__extraArgs = extraArguments
170 if fromMethod is not None and self.__inputAsMF:
171 self.__Method = fromMethod # logtimer(fromMethod)
173 self.__Type = "Method"
174 elif fromMethod is not None and not self.__inputAsMF:
175 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
177 self.__Type = "Method"
178 elif fromMatrix is not None:
180 if isinstance(fromMatrix, str):
181 fromMatrix = PlatformInfo.strmatrix2liststr( fromMatrix )
182 self.__Matrix = numpy.asarray( fromMatrix, dtype=float )
183 self.__Type = "Matrix"
189 def disableAvoidingRedundancy(self):
191 Operator.CM.disable()
193 def enableAvoidingRedundancy(self):
198 Operator.CM.disable()
204 def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False):
206 Permet de restituer le résultat de l'application de l'opérateur à une
207 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
208 argument devant a priori être du bon type.
210 - les arguments par série sont :
211 - xValue : argument adapté pour appliquer l'opérateur
212 - HValue : valeur précalculée de l'opérateur en ce point
213 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
220 if HValue is not None:
224 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
226 if _HValue is not None:
227 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
229 for i in range(len(_HValue)):
230 _HxValue.append( _HValue[i] )
232 Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name)
237 for i, xv in enumerate(_xValue):
239 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
241 __alreadyCalculated = False
243 if __alreadyCalculated:
244 self.__addOneCacheCall()
247 if self.__Matrix is not None:
248 self.__addOneMatrixCall()
249 _hv = self.__Matrix @ numpy.ravel(xv)
251 self.__addOneMethodCall()
255 _HxValue.append( _hv )
257 if len(_xserie)>0 and self.__Matrix is None:
258 if self.__extraArgs is None:
259 _hserie = self.__Method( _xserie ) # Calcul MF
261 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
262 if not hasattr(_hserie, "pop"):
264 "The user input multi-function doesn't seem to return a"+\
265 " result sequence, behaving like a mono-function. It has"+\
273 Operator.CM.storeValueInX(_xv,_hv,self.__name)
275 if returnSerieAsArrayMatrix:
276 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
278 if argsAsSerie: return _HxValue
279 else: return _HxValue[-1]
281 def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
283 Permet de restituer le résultat de l'application de l'opérateur à des
284 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
285 argument devant a priori être du bon type. Si la uValue est None,
286 on suppose que l'opérateur ne s'applique qu'à xValue.
288 - paires : les arguments par paire sont :
289 - xValue : argument X adapté pour appliquer l'opérateur
290 - uValue : argument U adapté pour appliquer l'opérateur
291 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
293 if argsAsSerie: _xuValue = paires
294 else: _xuValue = (paires,)
295 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
297 if self.__Matrix is not None:
299 for paire in _xuValue:
300 _xValue, _uValue = paire
301 self.__addOneMatrixCall()
302 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
305 for paire in _xuValue:
306 _xValue, _uValue = paire
307 if _uValue is not None:
308 _xuArgs.append( paire )
310 _xuArgs.append( _xValue )
311 self.__addOneMethodCall( len(_xuArgs) )
312 if self.__extraArgs is None:
313 _HxValue = self.__Method( _xuArgs ) # Calcul MF
315 _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF
317 if returnSerieAsArrayMatrix:
318 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
320 if argsAsSerie: return _HxValue
321 else: return _HxValue[-1]
323 def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
325 Permet de restituer le résultat de l'application de l'opérateur à une
326 série d'arguments xValue, sachant que l'opérateur est valable en
327 xNominal. Cette méthode se contente d'appliquer, son argument devant a
328 priori être du bon type. Si l'opérateur est linéaire car c'est une
329 matrice, alors il est valable en tout point nominal et xNominal peut
330 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
331 permet d'indiquer que l'argument est multi-paires.
333 - paires : les arguments par paire sont :
334 - xNominal : série d'arguments permettant de donner le point où
335 l'opérateur est construit pour être ensuite appliqué
336 - xValue : série d'arguments adaptés pour appliquer l'opérateur
337 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
339 if argsAsSerie: _nxValue = paires
340 else: _nxValue = (paires,)
341 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
343 if self.__Matrix is not None:
345 for paire in _nxValue:
346 _xNominal, _xValue = paire
347 self.__addOneMatrixCall()
348 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
350 self.__addOneMethodCall( len(_nxValue) )
351 if self.__extraArgs is None:
352 _HxValue = self.__Method( _nxValue ) # Calcul MF
354 _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
356 if returnSerieAsArrayMatrix:
357 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
359 if argsAsSerie: return _HxValue
360 else: return _HxValue[-1]
362 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
364 Permet de renvoyer l'opérateur sous la forme d'une matrice
366 if self.__Matrix is not None:
367 self.__addOneMatrixCall()
368 mValue = [self.__Matrix,]
369 elif not isinstance(ValueForMethodForm,str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
372 self.__addOneMethodCall( len(ValueForMethodForm) )
373 for _vfmf in ValueForMethodForm:
374 mValue.append( self.__Method(((_vfmf, None),)) )
376 self.__addOneMethodCall()
377 mValue = self.__Method(((ValueForMethodForm, None),))
379 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
381 if argsAsSerie: return mValue
382 else: return mValue[-1]
386 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
387 la forme d'une matrice
389 if self.__Matrix is not None:
390 return self.__Matrix.shape
392 raise ValueError("Matrix form of the operator is not available, nor the shape")
394 def nbcalls(self, which=None):
396 Renvoie les nombres d'évaluations de l'opérateur
399 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
400 self.__NbCallsAsMatrix,
401 self.__NbCallsAsMethod,
402 self.__NbCallsOfCached,
403 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
404 Operator.NbCallsAsMatrix,
405 Operator.NbCallsAsMethod,
406 Operator.NbCallsOfCached,
408 if which is None: return __nbcalls
409 else: return __nbcalls[which]
411 def __addOneMatrixCall(self):
412 "Comptabilise un appel"
413 self.__NbCallsAsMatrix += 1 # Decompte local
414 Operator.NbCallsAsMatrix += 1 # Decompte global
416 def __addOneMethodCall(self, nb = 1):
417 "Comptabilise un appel"
418 self.__NbCallsAsMethod += nb # Decompte local
419 Operator.NbCallsAsMethod += nb # Decompte global
421 def __addOneCacheCall(self):
422 "Comptabilise un appel"
423 self.__NbCallsOfCached += 1 # Decompte local
424 Operator.NbCallsOfCached += 1 # Decompte global
426 # ==============================================================================
427 class FullOperator(object):
429 Classe générale d'interface de type opérateur complet
430 (Direct, Linéaire Tangent, Adjoint)
433 "__name", "__check", "__extraArgs", "__FO", "__T",
437 name = "GenericFullOperator",
439 asOneFunction = None, # 1 Fonction
440 asThreeFunctions = None, # 3 Fonctions in a dictionary
441 asScript = None, # 1 or 3 Fonction(s) by script
442 asDict = None, # Parameters
444 extraArguments = None,
445 performancePrf = None,
446 inputAsMF = False,# Fonction(s) as Multi-Functions
451 self.__name = str(name)
452 self.__check = bool(toBeChecked)
453 self.__extraArgs = extraArguments
458 if (asDict is not None) and isinstance(asDict, dict):
459 __Parameters.update( asDict )
460 # Priorité à EnableMultiProcessingInDerivatives=True
461 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
462 __Parameters["EnableMultiProcessingInDerivatives"] = True
463 __Parameters["EnableMultiProcessingInEvaluation"] = False
464 if "EnableMultiProcessingInDerivatives" not in __Parameters:
465 __Parameters["EnableMultiProcessingInDerivatives"] = False
466 if __Parameters["EnableMultiProcessingInDerivatives"]:
467 __Parameters["EnableMultiProcessingInEvaluation"] = False
468 if "EnableMultiProcessingInEvaluation" not in __Parameters:
469 __Parameters["EnableMultiProcessingInEvaluation"] = False
470 if "withIncrement" in __Parameters: # Temporaire
471 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
472 # Le défaut est équivalent à "ReducedOverallRequirements"
473 __reduceM, __avoidRC = True, True
474 if performancePrf is not None:
475 if performancePrf == "ReducedAmountOfCalculation":
476 __reduceM, __avoidRC = False, True
477 elif performancePrf == "ReducedMemoryFootprint":
478 __reduceM, __avoidRC = True, False
479 elif performancePrf == "NoSavings":
480 __reduceM, __avoidRC = False, False
482 if asScript is not None:
483 __Matrix, __Function = None, None
485 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
487 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
488 __Function.update({"useApproximatedDerivatives":True})
489 __Function.update(__Parameters)
490 elif asThreeFunctions:
492 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
493 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
494 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
496 __Function.update(__Parameters)
499 if asOneFunction is not None:
500 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
501 if asOneFunction["Direct"] is not None:
502 __Function = asOneFunction
504 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
506 __Function = { "Direct":asOneFunction }
507 __Function.update({"useApproximatedDerivatives":True})
508 __Function.update(__Parameters)
509 elif asThreeFunctions is not None:
510 if isinstance(asThreeFunctions, dict) and \
511 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
512 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
513 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
514 __Function = asThreeFunctions
515 elif isinstance(asThreeFunctions, dict) and \
516 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
517 __Function = asThreeFunctions
518 __Function.update({"useApproximatedDerivatives":True})
521 "The functions has to be given in a dictionnary which have either"+\
522 " 1 key (\"Direct\") or"+\
523 " 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
524 if "Direct" not in asThreeFunctions:
525 __Function["Direct"] = asThreeFunctions["Tangent"]
526 __Function.update(__Parameters)
530 if appliedInX is not None and isinstance(appliedInX, dict):
531 __appliedInX = appliedInX
532 elif appliedInX is not None:
533 __appliedInX = {"HXb":appliedInX}
537 if scheduledBy is not None:
538 self.__T = scheduledBy
540 if isinstance(__Function, dict) and \
541 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
542 ("Direct" in __Function) and (__Function["Direct"] is not None):
543 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
544 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
545 if "withdX" not in __Function: __Function["withdX"] = None
546 if "withReducingMemoryUse" not in __Function: __Function["withReducingMemoryUse"] = __reduceM
547 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = __avoidRC
548 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
549 if "withLengthOfRedundancy" not in __Function: __Function["withLengthOfRedundancy"] = -1
550 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
551 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
552 from daCore import NumericObjects
553 FDA = NumericObjects.FDApproximation(
555 Function = __Function["Direct"],
556 centeredDF = __Function["CenteredFiniteDifference"],
557 increment = __Function["DifferentialIncrement"],
558 dX = __Function["withdX"],
559 extraArguments = self.__extraArgs,
560 reducingMemoryUse = __Function["withReducingMemoryUse"],
561 avoidingRedundancy = __Function["withAvoidingRedundancy"],
562 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
563 lengthOfRedundancy = __Function["withLengthOfRedundancy"],
564 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
565 mpWorkers = __Function["NumberOfProcesses"],
566 mfEnabled = __Function["withmfEnabled"],
568 self.__FO["Direct"] = Operator(
570 fromMethod = FDA.DirectOperator,
571 reducingMemoryUse = __reduceM,
572 avoidingRedundancy = __avoidRC,
573 inputAsMultiFunction = inputAsMF,
574 extraArguments = self.__extraArgs,
575 enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
576 self.__FO["Tangent"] = Operator(
577 name = self.__name+"Tangent",
578 fromMethod = FDA.TangentOperator,
579 reducingMemoryUse = __reduceM,
580 avoidingRedundancy = __avoidRC,
581 inputAsMultiFunction = inputAsMF,
582 extraArguments = self.__extraArgs )
583 self.__FO["Adjoint"] = Operator(
584 name = self.__name+"Adjoint",
585 fromMethod = FDA.AdjointOperator,
586 reducingMemoryUse = __reduceM,
587 avoidingRedundancy = __avoidRC,
588 inputAsMultiFunction = inputAsMF,
589 extraArguments = self.__extraArgs )
590 elif isinstance(__Function, dict) and \
591 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
592 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
593 self.__FO["Direct"] = Operator(
595 fromMethod = __Function["Direct"],
596 reducingMemoryUse = __reduceM,
597 avoidingRedundancy = __avoidRC,
598 inputAsMultiFunction = inputAsMF,
599 extraArguments = self.__extraArgs,
600 enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
601 self.__FO["Tangent"] = Operator(
602 name = self.__name+"Tangent",
603 fromMethod = __Function["Tangent"],
604 reducingMemoryUse = __reduceM,
605 avoidingRedundancy = __avoidRC,
606 inputAsMultiFunction = inputAsMF,
607 extraArguments = self.__extraArgs )
608 self.__FO["Adjoint"] = Operator(
609 name = self.__name+"Adjoint",
610 fromMethod = __Function["Adjoint"],
611 reducingMemoryUse = __reduceM,
612 avoidingRedundancy = __avoidRC,
613 inputAsMultiFunction = inputAsMF,
614 extraArguments = self.__extraArgs )
615 elif asMatrix is not None:
616 if isinstance(__Matrix, str):
617 __Matrix = PlatformInfo.strmatrix2liststr( __Matrix )
618 __matrice = numpy.asarray( __Matrix, dtype=float )
619 self.__FO["Direct"] = Operator(
621 fromMatrix = __matrice,
622 reducingMemoryUse = __reduceM,
623 avoidingRedundancy = __avoidRC,
624 inputAsMultiFunction = inputAsMF,
625 enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
626 self.__FO["Tangent"] = Operator(
627 name = self.__name+"Tangent",
628 fromMatrix = __matrice,
629 reducingMemoryUse = __reduceM,
630 avoidingRedundancy = __avoidRC,
631 inputAsMultiFunction = inputAsMF )
632 self.__FO["Adjoint"] = Operator(
633 name = self.__name+"Adjoint",
634 fromMatrix = __matrice.T,
635 reducingMemoryUse = __reduceM,
636 avoidingRedundancy = __avoidRC,
637 inputAsMultiFunction = inputAsMF )
641 "The %s object is improperly defined or undefined,"%self.__name+\
642 " it requires at minima either a matrix, a Direct operator for"+\
643 " approximate derivatives or a Tangent/Adjoint operators pair."+\
644 " Please check your operator input.")
646 if __appliedInX is not None:
647 self.__FO["AppliedInX"] = {}
648 for key in __appliedInX:
649 if isinstance(__appliedInX[key], str):
650 __appliedInX[key] = PlatformInfo.strvect2liststr( __appliedInX[key] )
651 self.__FO["AppliedInX"][key] = numpy.ravel( __appliedInX[key] ).reshape((-1,1))
653 self.__FO["AppliedInX"] = None
659 "x.__repr__() <==> repr(x)"
660 return repr(self.__FO)
663 "x.__str__() <==> str(x)"
664 return str(self.__FO)
666 # ==============================================================================
667 class Algorithm(object):
669 Classe générale d'interface de type algorithme
671 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
672 d'assimilation, en fournissant un container (dictionnaire) de variables
673 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
675 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
678 "_name", "_parameters", "__internal_state", "__required_parameters",
679 "_m", "__variable_names_not_public", "__canonical_parameter_name",
680 "__canonical_stored_name", "__replace_by_the_new_name",
684 def __init__(self, name):
686 L'initialisation présente permet de fabriquer des variables de stockage
687 disponibles de manière générique dans les algorithmes élémentaires. Ces
688 variables de stockage sont ensuite conservées dans un dictionnaire
689 interne à l'objet, mais auquel on accède par la méthode "get".
691 Les variables prévues sont :
692 - APosterioriCorrelations : matrice de corrélations de la matrice A
693 - APosterioriCovariance : matrice de covariances a posteriori : A
694 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
695 - APosterioriVariances : vecteur des variances de la matrice A
696 - Analysis : vecteur d'analyse : Xa
697 - BMA : Background moins Analysis : Xa - Xb
698 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
699 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
700 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
701 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
702 - CostFunctionJo : partie observations de la fonction-coût : Jo
703 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
704 - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
705 - CurrentOptimum : état optimal courant lors d'itérations
706 - CurrentState : état courant lors d'itérations
707 - CurrentStepNumber : pas courant d'avancement dans les algorithmes en évolution, à partir de 0
708 - EnsembleOfSimulations : ensemble d'états (sorties, simulations) rangés par colonne dans une matrice
709 - EnsembleOfSnapshots : ensemble d'états rangés par colonne dans une matrice
710 - EnsembleOfStates : ensemble d'états (entrées, paramètres) rangés par colonne dans une matrice
711 - ForecastCovariance : covariance de l'état prédit courant lors d'itérations
712 - ForecastState : état prédit courant lors d'itérations
713 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
714 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
715 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
716 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
717 - Innovation : l'innovation : d = Y - H(X)
718 - InnovationAtCurrentAnalysis : l'innovation à l'état analysé : da = Y - H(Xa)
719 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
720 - InternalCostFunctionJ : ensemble de valeurs internes de fonction-coût J dans un vecteur
721 - InternalCostFunctionJb : ensemble de valeurs internes de fonction-coût Jb dans un vecteur
722 - InternalCostFunctionJb : ensemble de valeurs internes de fonction-coût Jo dans un vecteur
723 - InternalStates : ensemble d'états internes rangés par colonne dans une matrice (=EnsembleOfSnapshots)
724 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
725 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
726 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
727 - KalmanGainAtOptimum : gain de Kalman à l'optimum
728 - MahalanobisConsistency : indicateur de consistance des covariances
729 - OMA : Observation moins Analyse : Y - Xa
730 - OMB : Observation moins Background : Y - Xb
731 - ReducedCoordinates : coordonnées dans la base réduite
732 - Residu : dans le cas des algorithmes de vérification
733 - SampledStateForQuantiles : échantillons d'états pour l'estimation des quantiles
734 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
735 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
736 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
737 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
738 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
739 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
740 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
741 - SingularValues : valeurs singulières provenant d'une décomposition SVD
742 On peut rajouter des variables à stocker dans l'initialisation de
743 l'algorithme élémentaire qui va hériter de cette classe
745 logging.debug("%s Initialisation", str(name))
746 self._m = PlatformInfo.SystemUsage()
748 self._name = str( name )
749 self._parameters = {"StoreSupplementaryCalculations":[]}
750 self.__internal_state = {}
751 self.__required_parameters = {}
752 self.__required_inputs = {
753 "RequiredInputValues":{"mandatory":(), "optional":()},
754 "ClassificationTags":[],
756 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
757 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
758 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
759 self.__replace_by_the_new_name = {} # Nouveau nom à partir d'un nom ancien
761 self.StoredVariables = {}
762 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
763 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
764 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
765 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
766 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
767 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
768 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
769 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
770 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
771 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
772 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
773 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
774 self.StoredVariables["CurrentEnsembleState"] = Persistence.OneMatrix(name = "CurrentEnsembleState")
775 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
776 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
777 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
778 self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
779 self.StoredVariables["EnsembleOfSimulations"] = Persistence.OneMatrix(name = "EnsembleOfSimulations")
780 self.StoredVariables["EnsembleOfSnapshots"] = Persistence.OneMatrix(name = "EnsembleOfSnapshots")
781 self.StoredVariables["EnsembleOfStates"] = Persistence.OneMatrix(name = "EnsembleOfStates")
782 self.StoredVariables["ExcludedPoints"] = Persistence.OneVector(name = "ExcludedPoints")
783 self.StoredVariables["ForecastCovariance"] = Persistence.OneMatrix(name = "ForecastCovariance")
784 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
785 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
786 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
787 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
788 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
789 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
790 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
791 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
792 self.StoredVariables["InternalCostFunctionJ"] = Persistence.OneVector(name = "InternalCostFunctionJ")
793 self.StoredVariables["InternalCostFunctionJb"] = Persistence.OneVector(name = "InternalCostFunctionJb")
794 self.StoredVariables["InternalCostFunctionJo"] = Persistence.OneVector(name = "InternalCostFunctionJo")
795 self.StoredVariables["InternalStates"] = Persistence.OneMatrix(name = "InternalStates")
796 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
797 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
798 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
799 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
800 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
801 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
802 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
803 self.StoredVariables["OptimalPoints"] = Persistence.OneVector(name = "OptimalPoints")
804 self.StoredVariables["ReducedBasis"] = Persistence.OneMatrix(name = "ReducedBasis")
805 self.StoredVariables["ReducedCoordinates"] = Persistence.OneVector(name = "ReducedCoordinates")
806 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
807 self.StoredVariables["Residus"] = Persistence.OneVector(name = "Residus")
808 self.StoredVariables["SampledStateForQuantiles"] = Persistence.OneMatrix(name = "SampledStateForQuantiles")
809 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
810 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
811 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
812 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
813 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
814 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
815 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
816 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
817 self.StoredVariables["SingularValues"] = Persistence.OneVector(name = "SingularValues")
819 for k in self.StoredVariables:
820 self.__canonical_stored_name[k.lower()] = k
822 for k, v in self.__variable_names_not_public.items():
823 self.__canonical_parameter_name[k.lower()] = k
824 self.__canonical_parameter_name["algorithm"] = "Algorithm"
825 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
827 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
829 logging.debug("%s Lancement", self._name)
830 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
831 self._getTimeState(reset=True)
833 # Mise à jour des paramètres internes avec le contenu de Parameters, en
834 # reprenant les valeurs par défauts pour toutes celles non définies
835 self.__setParameters(Parameters, reset=True)
836 for k, v in self.__variable_names_not_public.items():
837 if k not in self._parameters: self.__setParameters( {k:v} )
839 # Corrections et compléments des vecteurs
840 def __test_vvalue(argument, variable, argname, symbol=None):
841 if symbol is None: symbol = variable
843 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
844 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol))
845 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
846 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol))
848 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol))
850 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
851 logging.debug("%s %s vector %s is required and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
852 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
853 logging.debug("%s %s vector %s is optional and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
856 "%s %s vector %s is set although neither required nor optional, and its size is %i."%(
857 self._name,argname,symbol,numpy.array(argument).size))
859 __test_vvalue( Xb, "Xb", "Background or initial state" )
860 __test_vvalue( Y, "Y", "Observation" )
861 __test_vvalue( U, "U", "Control" )
863 # Corrections et compléments des covariances
864 def __test_cvalue(argument, variable, argname, symbol=None):
865 if symbol is None: symbol = variable
867 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
868 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol))
869 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
870 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol))
872 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol))
874 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
875 logging.debug("%s %s error covariance matrix %s is required and set."%(self._name,argname,symbol))
876 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
877 logging.debug("%s %s error covariance matrix %s is optional and set."%(self._name,argname,symbol))
879 logging.debug("%s %s error covariance matrix %s is set although neither required nor optional."%(self._name,argname,symbol))
881 __test_cvalue( B, "B", "Background" )
882 __test_cvalue( R, "R", "Observation" )
883 __test_cvalue( Q, "Q", "Evolution" )
885 # Corrections et compléments des opérateurs
886 def __test_ovalue(argument, variable, argname, symbol=None):
887 if symbol is None: symbol = variable
888 if argument is None or (isinstance(argument,dict) and len(argument)==0):
889 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
890 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol))
891 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
892 logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol))
894 logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol))
896 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
897 logging.debug("%s %s operator %s is required and set."%(self._name,argname,symbol))
898 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
899 logging.debug("%s %s operator %s is optional and set."%(self._name,argname,symbol))
901 logging.debug("%s %s operator %s is set although neither required nor optional."%(self._name,argname,symbol))
903 __test_ovalue( HO, "HO", "Observation", "H" )
904 __test_ovalue( EM, "EM", "Evolution", "M" )
905 __test_ovalue( CM, "CM", "Control Model", "C" )
907 # Corrections et compléments des bornes
908 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
909 logging.debug("%s Bounds taken into account"%(self._name,))
911 self._parameters["Bounds"] = None
912 if ("StateBoundsForQuantiles" in self._parameters) \
913 and isinstance(self._parameters["StateBoundsForQuantiles"], (list, tuple)) \
914 and (len(self._parameters["StateBoundsForQuantiles"]) > 0):
915 logging.debug("%s Bounds for quantiles states taken into account"%(self._name,))
916 # Attention : contrairement à Bounds, pas de défaut à None, sinon on ne peut pas être sans bornes
918 # Corrections et compléments de l'initialisation en X
919 if "InitializationPoint" in self._parameters:
921 if self._parameters["InitializationPoint"] is not None and hasattr(self._parameters["InitializationPoint"],'size'):
922 if self._parameters["InitializationPoint"].size != numpy.ravel(Xb).size:
923 raise ValueError("Incompatible size %i of forced initial point that have to replace the background of size %i" \
924 %(self._parameters["InitializationPoint"].size,numpy.ravel(Xb).size))
925 # Obtenu par typecast : numpy.ravel(self._parameters["InitializationPoint"])
927 self._parameters["InitializationPoint"] = numpy.ravel(Xb)
929 if self._parameters["InitializationPoint"] is None:
930 raise ValueError("Forced initial point can not be set without any given Background or required value")
932 # Correction pour pallier a un bug de TNC sur le retour du Minimum
933 if "Minimizer" in self._parameters and self._parameters["Minimizer"] == "TNC":
934 self.setParameterValue("StoreInternalVariables",True)
936 # Verbosité et logging
937 if logging.getLogger().level < logging.WARNING:
938 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
939 self._parameters["optmessages"] = 15
941 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
942 self._parameters["optmessages"] = 0
946 def _post_run(self,_oH=None):
948 if ("StoreSupplementaryCalculations" in self._parameters) and \
949 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
950 for _A in self.StoredVariables["APosterioriCovariance"]:
951 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
952 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
953 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
954 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
955 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
956 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
957 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
958 self.StoredVariables["APosterioriCorrelations"].store( _C )
959 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
961 "%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i",
962 self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
964 "%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i",
965 self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
966 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
967 logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
968 logging.debug("%s Terminé", self._name)
971 def _toStore(self, key):
972 "True if in StoreSupplementaryCalculations, else False"
973 return key in self._parameters["StoreSupplementaryCalculations"]
975 def get(self, key=None):
977 Renvoie l'une des variables stockées identifiée par la clé, ou le
978 dictionnaire de l'ensemble des variables disponibles en l'absence de
979 clé. Ce sont directement les variables sous forme objet qui sont
980 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
981 des classes de persistance.
984 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
986 return self.StoredVariables
988 def __contains__(self, key=None):
989 "D.__contains__(k) -> True if D has a key k, else False"
990 if key is None or key.lower() not in self.__canonical_stored_name:
993 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
996 "D.keys() -> list of D's keys"
997 if hasattr(self, "StoredVariables"):
998 return self.StoredVariables.keys()
1002 def pop(self, k, d):
1003 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
1004 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
1005 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
1010 raise TypeError("pop expected at least 1 arguments, got 0")
1011 "If key is not found, d is returned if given, otherwise KeyError is raised"
1017 def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
1019 Doit implémenter l'opération élémentaire de calcul algorithmique.
1021 raise NotImplementedError("Mathematical algorithmic calculation has not been implemented!")
1023 def defineRequiredParameter(self,
1035 Permet de définir dans l'algorithme des paramètres requis et leurs
1036 caractéristiques par défaut.
1039 raise ValueError("A name is mandatory to define a required parameter.")
1041 self.__required_parameters[name] = {
1042 "default" : default,
1043 "typecast" : typecast,
1046 "listval" : listval,
1047 "listadv" : listadv,
1048 "message" : message,
1049 "oldname" : oldname,
1051 self.__canonical_parameter_name[name.lower()] = name
1052 if oldname is not None:
1053 self.__canonical_parameter_name[oldname.lower()] = name # Conversion
1054 self.__replace_by_the_new_name[oldname.lower()] = name
1055 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
1057 def getRequiredParameters(self, noDetails=True):
1059 Renvoie la liste des noms de paramètres requis ou directement le
1060 dictionnaire des paramètres requis.
1063 return sorted(self.__required_parameters.keys())
1065 return self.__required_parameters
1067 def setParameterValue(self, name=None, value=None):
1069 Renvoie la valeur d'un paramètre requis de manière contrôlée
1071 __k = self.__canonical_parameter_name[name.lower()]
1072 default = self.__required_parameters[__k]["default"]
1073 typecast = self.__required_parameters[__k]["typecast"]
1074 minval = self.__required_parameters[__k]["minval"]
1075 maxval = self.__required_parameters[__k]["maxval"]
1076 listval = self.__required_parameters[__k]["listval"]
1077 listadv = self.__required_parameters[__k]["listadv"]
1079 if value is None and default is None:
1081 elif value is None and default is not None:
1082 if typecast is None: __val = default
1083 else: __val = typecast( default )
1085 if typecast is None: __val = value
1088 __val = typecast( value )
1090 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
1092 if minval is not None and (numpy.array(__val, float) < minval).any():
1093 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
1094 if maxval is not None and (numpy.array(__val, float) > maxval).any():
1095 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
1096 if listval is not None or listadv is not None:
1097 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
1099 if listval is not None and v in listval: continue
1100 elif listadv is not None and v in listadv: continue
1102 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
1103 elif not (listval is not None and __val in listval) and not (listadv is not None and __val in listadv):
1104 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
1106 if __k in ["SetSeed",]:
1111 def requireInputArguments(self, mandatory=(), optional=()):
1113 Permet d'imposer des arguments de calcul requis en entrée.
1115 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
1116 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
1118 def getInputArguments(self):
1120 Permet d'obtenir les listes des arguments de calcul requis en entrée.
1122 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
1124 def setAttributes(self, tags=()):
1126 Permet d'adjoindre des attributs comme les tags de classification.
1127 Renvoie la liste actuelle dans tous les cas.
1129 self.__required_inputs["ClassificationTags"].extend( tags )
1130 return self.__required_inputs["ClassificationTags"]
1132 def __setParameters(self, fromDico={}, reset=False):
1134 Permet de stocker les paramètres reçus dans le dictionnaire interne.
1136 self._parameters.update( fromDico )
1137 __inverse_fromDico_keys = {}
1138 for k in fromDico.keys():
1139 if k.lower() in self.__canonical_parameter_name:
1140 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
1141 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
1142 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
1144 for k in __inverse_fromDico_keys.values():
1145 if k.lower() in self.__replace_by_the_new_name:
1146 __newk = self.__replace_by_the_new_name[k.lower()]
1147 __msg = "the parameter \"%s\" used in \"%s\" algorithm case is deprecated and has to be replaced by \"%s\"."%(k,self._name,__newk)
1148 __msg += " Please update your code."
1149 warnings.warn(__msg, FutureWarning, stacklevel=50)
1151 for k in self.__required_parameters.keys():
1152 if k in __canonic_fromDico_keys:
1153 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
1155 self._parameters[k] = self.setParameterValue(k)
1158 if hasattr(self._parameters[k],"__len__") and len(self._parameters[k]) > 100:
1159 logging.debug("%s %s de longueur %s", self._name, self.__required_parameters[k]["message"], len(self._parameters[k]))
1161 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
1163 def _setInternalState(self, key=None, value=None, fromDico={}, reset=False):
1165 Permet de stocker des variables nommées constituant l'état interne
1167 if reset: # Vide le dictionnaire préalablement
1168 self.__internal_state = {}
1169 if key is not None and value is not None:
1170 self.__internal_state[key] = value
1171 self.__internal_state.update( dict(fromDico) )
1173 def _getInternalState(self, key=None):
1175 Restitue un état interne sous la forme d'un dictionnaire de variables nommées
1177 if key is not None and key in self.__internal_state:
1178 return self.__internal_state[key]
1180 return self.__internal_state
1182 def _getTimeState(self, reset=False):
1184 Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde
1187 self.__initial_cpu_time = time.process_time()
1188 self.__initial_elapsed_time = time.perf_counter()
1191 self.__cpu_time = time.process_time() - self.__initial_cpu_time
1192 self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time
1193 return self.__cpu_time, self.__elapsed_time
1195 def _StopOnTimeLimit(self, X=None, withReason=False):
1196 "Stop criteria on time limit: True/False [+ Reason]"
1197 c, e = self._getTimeState()
1198 if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]:
1199 __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"])
1200 elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]:
1201 __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"])
1203 __SC, __SR = False, ""
1209 # ==============================================================================
1210 class PartialAlgorithm(object):
1212 Classe pour mimer "Algorithm" du point de vue stockage, mais sans aucune
1213 action avancée comme la vérification . Pour les méthodes reprises ici,
1214 le fonctionnement est identique à celles de la classe "Algorithm".
1217 "_name", "_parameters", "StoredVariables", "__canonical_stored_name",
1220 def __init__(self, name):
1221 self._name = str( name )
1222 self._parameters = {"StoreSupplementaryCalculations":[]}
1224 self.StoredVariables = {}
1225 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
1226 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
1227 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
1228 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
1229 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
1230 self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
1232 self.__canonical_stored_name = {}
1233 for k in self.StoredVariables:
1234 self.__canonical_stored_name[k.lower()] = k
1236 def _toStore(self, key):
1237 "True if in StoreSupplementaryCalculations, else False"
1238 return key in self._parameters["StoreSupplementaryCalculations"]
1240 def get(self, key=None):
1242 Renvoie l'une des variables stockées identifiée par la clé, ou le
1243 dictionnaire de l'ensemble des variables disponibles en l'absence de
1244 clé. Ce sont directement les variables sous forme objet qui sont
1245 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
1246 des classes de persistance.
1249 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
1251 return self.StoredVariables
1253 # ==============================================================================
1254 class AlgorithmAndParameters(object):
1256 Classe générale d'interface d'action pour l'algorithme et ses paramètres
1259 "__name", "__algorithm", "__algorithmFile", "__algorithmName", "__A",
1260 "__P", "__Xb", "__Y", "__U", "__HO", "__EM", "__CM", "__B", "__R",
1261 "__Q", "__variable_names_not_public",
1265 name = "GenericAlgorithm",
1272 self.__name = str(name)
1276 self.__algorithm = {}
1277 self.__algorithmFile = None
1278 self.__algorithmName = None
1280 self.updateParameters( asDict, asScript )
1282 if asAlgorithm is None and asScript is not None:
1283 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1285 __Algo = asAlgorithm
1287 if __Algo is not None:
1288 self.__A = str(__Algo)
1289 self.__P.update( {"Algorithm":self.__A} )
1291 self.__setAlgorithm( self.__A )
1293 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
1295 def updateParameters(self,
1299 "Mise à jour des paramètres"
1300 if asDict is None and asScript is not None:
1301 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1305 if __Dict is not None:
1306 self.__P.update( dict(__Dict) )
1308 def executePythonScheme(self, asDictAO = None):
1309 "Permet de lancer le calcul d'assimilation"
1310 Operator.CM.clearCache()
1312 if not isinstance(asDictAO, dict):
1313 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1314 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
1315 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
1316 else: self.__Xb = None
1317 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
1318 else: self.__Y = asDictAO["Observation"]
1319 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
1320 else: self.__U = asDictAO["ControlInput"]
1321 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
1322 else: self.__HO = asDictAO["ObservationOperator"]
1323 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
1324 else: self.__EM = asDictAO["EvolutionModel"]
1325 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
1326 else: self.__CM = asDictAO["ControlModel"]
1327 self.__B = asDictAO["BackgroundError"]
1328 self.__R = asDictAO["ObservationError"]
1329 self.__Q = asDictAO["EvolutionError"]
1331 self.__shape_validate()
1333 self.__algorithm.run(
1343 Parameters = self.__P,
1347 def executeYACSScheme(self, FileName=None):
1348 "Permet de lancer le calcul d'assimilation"
1349 if FileName is None or not os.path.exists(FileName):
1350 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1352 __file = os.path.abspath(FileName)
1353 logging.debug("The YACS file name is \"%s\"."%__file)
1354 if not PlatformInfo.has_salome or \
1355 not PlatformInfo.has_yacs or \
1356 not PlatformInfo.has_adao:
1357 raise ImportError("\n\n"+\
1358 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
1359 "Please load the right environnement before trying to use it.\n")
1362 import SALOMERuntime
1364 SALOMERuntime.RuntimeSALOME_setRuntime()
1366 r = pilot.getRuntime()
1367 xmlLoader = loader.YACSLoader()
1368 xmlLoader.registerProcCataLoader()
1370 catalogAd = r.loadCatalog("proc", __file)
1371 r.addCatalog(catalogAd)
1376 p = xmlLoader.load(__file)
1377 except IOError as ex:
1378 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1380 logger = p.getLogger("parser")
1381 if not logger.isEmpty():
1382 print("The imported YACS XML schema has errors on parsing:")
1383 print(logger.getStr())
1386 print("The YACS XML schema is not valid and will not be executed:")
1387 print(p.getErrorReport())
1389 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1390 p.checkConsistency(info)
1391 if info.areWarningsOrErrors():
1392 print("The YACS XML schema is not coherent and will not be executed:")
1393 print(info.getGlobalRepr())
1395 e = pilot.ExecutorSwig()
1397 if p.getEffectiveState() != pilot.DONE:
1398 print(p.getErrorReport())
1402 def get(self, key = None):
1403 "Vérifie l'existence d'une clé de variable ou de paramètres"
1404 if key in self.__algorithm:
1405 return self.__algorithm.get( key )
1406 elif key in self.__P:
1407 return self.__P[key]
1409 allvariables = self.__P
1410 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1413 def pop(self, k, d):
1414 "Necessaire pour le pickling"
1415 return self.__algorithm.pop(k, d)
1417 def getAlgorithmRequiredParameters(self, noDetails=True):
1418 "Renvoie la liste des paramètres requis selon l'algorithme"
1419 return self.__algorithm.getRequiredParameters(noDetails)
1421 def getAlgorithmInputArguments(self):
1422 "Renvoie la liste des entrées requises selon l'algorithme"
1423 return self.__algorithm.getInputArguments()
1425 def getAlgorithmAttributes(self):
1426 "Renvoie la liste des attributs selon l'algorithme"
1427 return self.__algorithm.setAttributes()
1429 def setObserver(self, __V, __O, __I, __S):
1430 if self.__algorithm is None \
1431 or isinstance(self.__algorithm, dict) \
1432 or not hasattr(self.__algorithm,"StoredVariables"):
1433 raise ValueError("No observer can be build before choosing an algorithm.")
1434 if __V not in self.__algorithm:
1435 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1437 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1440 HookParameters = __I,
1443 def removeObserver(self, __V, __O, __A = False):
1444 if self.__algorithm is None \
1445 or isinstance(self.__algorithm, dict) \
1446 or not hasattr(self.__algorithm,"StoredVariables"):
1447 raise ValueError("No observer can be removed before choosing an algorithm.")
1448 if __V not in self.__algorithm:
1449 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1451 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1456 def hasObserver(self, __V):
1457 if self.__algorithm is None \
1458 or isinstance(self.__algorithm, dict) \
1459 or not hasattr(self.__algorithm,"StoredVariables"):
1461 if __V not in self.__algorithm:
1463 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1466 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1467 for k in self.__variable_names_not_public:
1468 if k in __allvariables: __allvariables.remove(k)
1469 return __allvariables
1471 def __contains__(self, key=None):
1472 "D.__contains__(k) -> True if D has a key k, else False"
1473 return key in self.__algorithm or key in self.__P
1476 "x.__repr__() <==> repr(x)"
1477 return repr(self.__A)+", "+repr(self.__P)
1480 "x.__str__() <==> str(x)"
1481 return str(self.__A)+", "+str(self.__P)
1483 def __setAlgorithm(self, choice = None ):
1485 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1486 d'assimilation. L'argument est un champ caractère se rapportant au nom
1487 d'un algorithme réalisant l'opération sur les arguments fixes.
1490 raise ValueError("Error: algorithm choice has to be given")
1491 if self.__algorithmName is not None:
1492 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1493 daDirectory = "daAlgorithms"
1495 # Recherche explicitement le fichier complet
1496 # ------------------------------------------
1498 for directory in sys.path:
1499 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1500 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1501 if module_path is None:
1503 "No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1505 # Importe le fichier complet comme un module
1506 # ------------------------------------------
1508 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1509 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1510 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1511 raise ImportError("this module does not define a valid elementary algorithm.")
1512 self.__algorithmName = str(choice)
1513 sys.path = sys_path_tmp ; del sys_path_tmp
1514 except ImportError as e:
1516 "The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1518 # Instancie un objet du type élémentaire du fichier
1519 # -------------------------------------------------
1520 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1523 def __shape_validate(self):
1525 Validation de la correspondance correcte des tailles des variables et
1526 des matrices s'il y en a.
1528 if self.__Xb is None: __Xb_shape = (0,)
1529 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1530 elif hasattr(self.__Xb,"shape"):
1531 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1532 else: __Xb_shape = self.__Xb.shape()
1533 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1535 if self.__Y is None: __Y_shape = (0,)
1536 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1537 elif hasattr(self.__Y,"shape"):
1538 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1539 else: __Y_shape = self.__Y.shape()
1540 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1542 if self.__U is None: __U_shape = (0,)
1543 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1544 elif hasattr(self.__U,"shape"):
1545 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1546 else: __U_shape = self.__U.shape()
1547 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1549 if self.__B is None: __B_shape = (0,0)
1550 elif hasattr(self.__B,"shape"):
1551 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1552 else: __B_shape = self.__B.shape()
1553 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1555 if self.__R is None: __R_shape = (0,0)
1556 elif hasattr(self.__R,"shape"):
1557 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1558 else: __R_shape = self.__R.shape()
1559 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1561 if self.__Q is None: __Q_shape = (0,0)
1562 elif hasattr(self.__Q,"shape"):
1563 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1564 else: __Q_shape = self.__Q.shape()
1565 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1567 if len(self.__HO) == 0: __HO_shape = (0,0)
1568 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1569 elif hasattr(self.__HO["Direct"],"shape"):
1570 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1571 else: __HO_shape = self.__HO["Direct"].shape()
1572 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1574 if len(self.__EM) == 0: __EM_shape = (0,0)
1575 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1576 elif hasattr(self.__EM["Direct"],"shape"):
1577 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1578 else: __EM_shape = self.__EM["Direct"].shape()
1579 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1581 if len(self.__CM) == 0: __CM_shape = (0,0)
1582 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1583 elif hasattr(self.__CM["Direct"],"shape"):
1584 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1585 else: __CM_shape = self.__CM["Direct"].shape()
1586 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1588 # Vérification des conditions
1589 # ---------------------------
1590 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1591 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1592 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1593 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1595 if not( min(__B_shape) == max(__B_shape) ):
1596 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1597 if not( min(__R_shape) == max(__R_shape) ):
1598 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1599 if not( min(__Q_shape) == max(__Q_shape) ):
1600 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1601 if not( min(__EM_shape) == max(__EM_shape) ):
1602 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1604 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1606 "Shape characteristic of observation operator (H)"+\
1607 " \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1608 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1610 "Shape characteristic of observation operator (H)"+\
1611 " \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1612 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1614 "Shape characteristic of observation operator (H)"+\
1615 " \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1616 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1618 "Shape characteristic of observation operator (H)"+\
1619 " \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1621 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1622 if self.__algorithmName in ["EnsembleBlue",]:
1623 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1624 self.__Xb = Persistence.OneVector("Background")
1625 for member in asPersistentVector:
1626 self.__Xb.store( numpy.asarray(member, dtype=float) )
1627 __Xb_shape = min(__B_shape)
1630 "Shape characteristic of a priori errors covariance matrix (B)"+\
1631 " \"%s\" and background vector (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1633 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1635 "Shape characteristic of observation errors covariance matrix (R)"+\
1636 " \"%s\" and observation vector (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1638 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1640 "Shape characteristic of evolution model (EM)"+\
1641 " \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1643 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1645 "Shape characteristic of control model (CM)"+\
1646 " \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1648 if ("Bounds" in self.__P) \
1649 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1650 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1651 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1652 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1654 if ("StateBoundsForQuantiles" in self.__P) \
1655 and (isinstance(self.__P["StateBoundsForQuantiles"], list) or isinstance(self.__P["StateBoundsForQuantiles"], tuple)) \
1656 and (len(self.__P["StateBoundsForQuantiles"]) != max(__Xb_shape)):
1657 raise ValueError("The number \"%s\" of bound pairs for the quantile state (X) components is different of the size \"%s\" of the state itself." \
1658 %(len(self.__P["StateBoundsForQuantiles"]),max(__Xb_shape)))
1662 # ==============================================================================
1663 class RegulationAndParameters(object):
1665 Classe générale d'interface d'action pour la régulation et ses paramètres
1667 __slots__ = ("__name", "__P")
1670 name = "GenericRegulation",
1677 self.__name = str(name)
1680 if asAlgorithm is None and asScript is not None:
1681 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1683 __Algo = asAlgorithm
1685 if asDict is None and asScript is not None:
1686 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1690 if __Dict is not None:
1691 self.__P.update( dict(__Dict) )
1693 if __Algo is not None:
1694 self.__P.update( {"Algorithm":str(__Algo)} )
1696 def get(self, key = None):
1697 "Vérifie l'existence d'une clé de variable ou de paramètres"
1699 return self.__P[key]
1703 # ==============================================================================
1704 class DataObserver(object):
1706 Classe générale d'interface de type observer
1708 __slots__ = ("__name", "__V", "__O", "__I")
1711 name = "GenericObserver",
1723 self.__name = str(name)
1728 if onVariable is None:
1729 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1730 elif type(onVariable) in (tuple, list):
1731 self.__V = tuple(map( str, onVariable ))
1732 if withInfo is None:
1735 self.__I = (str(withInfo),)*len(self.__V)
1736 elif isinstance(onVariable, str):
1737 self.__V = (onVariable,)
1738 if withInfo is None:
1739 self.__I = (onVariable,)
1741 self.__I = (str(withInfo),)
1743 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1745 if asObsObject is not None:
1746 self.__O = asObsObject
1748 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1749 __Function = Observer2Func(__FunctionText)
1750 self.__O = __Function.getfunc()
1752 for k in range(len(self.__V)):
1755 if ename not in withAlgo:
1756 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1758 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1761 "x.__repr__() <==> repr(x)"
1762 return repr(self.__V)+"\n"+repr(self.__O)
1765 "x.__str__() <==> str(x)"
1766 return str(self.__V)+"\n"+str(self.__O)
1768 # ==============================================================================
1769 class UserScript(object):
1771 Classe générale d'interface de type texte de script utilisateur
1773 __slots__ = ("__name", "__F")
1776 name = "GenericUserScript",
1783 self.__name = str(name)
1785 if asString is not None:
1787 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1788 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1789 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1790 self.__F = Templates.ObserverTemplates[asTemplate]
1791 elif asScript is not None:
1792 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1797 "x.__repr__() <==> repr(x)"
1798 return repr(self.__F)
1801 "x.__str__() <==> str(x)"
1802 return str(self.__F)
1804 # ==============================================================================
1805 class ExternalParameters(object):
1807 Classe générale d'interface pour le stockage des paramètres externes
1809 __slots__ = ("__name", "__P")
1812 name = "GenericExternalParameters",
1818 self.__name = str(name)
1821 self.updateParameters( asDict, asScript )
1823 def updateParameters(self,
1827 "Mise à jour des paramètres"
1828 if asDict is None and asScript is not None:
1829 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1833 if __Dict is not None:
1834 self.__P.update( dict(__Dict) )
1836 def get(self, key = None):
1838 return self.__P[key]
1840 return list(self.__P.keys())
1843 return list(self.__P.keys())
1845 def pop(self, k, d):
1846 return self.__P.pop(k, d)
1849 return self.__P.items()
1851 def __contains__(self, key=None):
1852 "D.__contains__(k) -> True if D has a key k, else False"
1853 return key in self.__P
1855 # ==============================================================================
1856 class State(object):
1858 Classe générale d'interface de type état
1861 "__name", "__check", "__V", "__T", "__is_vector", "__is_series",
1866 name = "GenericVector",
1868 asPersistentVector = None,
1874 toBeChecked = False,
1877 Permet de définir un vecteur :
1878 - asVector : entrée des données, comme un vecteur compatible avec le
1879 constructeur de numpy.matrix, ou "True" si entrée par script.
1880 - asPersistentVector : entrée des données, comme une série de vecteurs
1881 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1882 type Persistence, ou "True" si entrée par script.
1883 - asScript : si un script valide est donné contenant une variable
1884 nommée "name", la variable est de type "asVector" (par défaut) ou
1885 "asPersistentVector" selon que l'une de ces variables est placée à
1887 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1888 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1889 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1890 nommée "name"), on récupère les colonnes et on les range ligne après
1891 ligne (colMajor=False, par défaut) ou colonne après colonne
1892 (colMajor=True). La variable résultante est de type "asVector" (par
1893 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1896 self.__name = str(name)
1897 self.__check = bool(toBeChecked)
1901 self.__is_vector = False
1902 self.__is_series = False
1904 if asScript is not None:
1905 __Vector, __Series = None, None
1906 if asPersistentVector:
1907 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1909 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1910 elif asDataFile is not None:
1911 __Vector, __Series = None, None
1912 if asPersistentVector:
1913 if colNames is not None:
1914 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1916 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1917 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1918 __Series = numpy.transpose(__Series)
1919 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1920 __Series = numpy.transpose(__Series)
1922 if colNames is not None:
1923 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1925 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1927 __Vector = numpy.ravel(__Vector, order = "F")
1929 __Vector = numpy.ravel(__Vector, order = "C")
1931 __Vector, __Series = asVector, asPersistentVector
1933 if __Vector is not None:
1934 self.__is_vector = True
1935 if isinstance(__Vector, str):
1936 __Vector = PlatformInfo.strvect2liststr( __Vector )
1937 self.__V = numpy.ravel(numpy.asarray( __Vector, dtype=float )).reshape((-1,1))
1938 self.shape = self.__V.shape
1939 self.size = self.__V.size
1940 elif __Series is not None:
1941 self.__is_series = True
1942 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1943 self.__V = Persistence.OneVector(self.__name)
1944 if isinstance(__Series, str):
1945 __Series = PlatformInfo.strmatrix2liststr(__Series)
1946 for member in __Series:
1947 if isinstance(member, str):
1948 member = PlatformInfo.strvect2liststr( member )
1949 self.__V.store(numpy.asarray( member, dtype=float ))
1952 if isinstance(self.__V.shape, (tuple, list)):
1953 self.shape = self.__V.shape
1955 self.shape = self.__V.shape()
1956 if len(self.shape) == 1:
1957 self.shape = (self.shape[0],1)
1958 self.size = self.shape[0] * self.shape[1]
1961 "The %s object is improperly defined or undefined,"%self.__name+\
1962 " it requires at minima either a vector, a list/tuple of"+\
1963 " vectors or a persistent object. Please check your vector input.")
1965 if scheduledBy is not None:
1966 self.__T = scheduledBy
1968 def getO(self, withScheduler=False):
1970 return self.__V, self.__T
1971 elif self.__T is None:
1977 "Vérification du type interne"
1978 return self.__is_vector
1981 "Vérification du type interne"
1982 return self.__is_series
1985 "x.__repr__() <==> repr(x)"
1986 return repr(self.__V)
1989 "x.__str__() <==> str(x)"
1990 return str(self.__V)
1992 # ==============================================================================
1993 class Covariance(object):
1995 Classe générale d'interface de type covariance
1998 "__name", "__check", "__C", "__is_scalar", "__is_vector", "__is_matrix",
1999 "__is_object", "shape", "size",
2003 name = "GenericCovariance",
2004 asCovariance = None,
2005 asEyeByScalar = None,
2006 asEyeByVector = None,
2009 toBeChecked = False,
2012 Permet de définir une covariance :
2013 - asCovariance : entrée des données, comme une matrice compatible avec
2014 le constructeur de numpy.matrix
2015 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
2016 multiplicatif d'une matrice de corrélation identité, aucune matrice
2017 n'étant donc explicitement à donner
2018 - asEyeByVector : entrée des données comme un seul vecteur de variance,
2019 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
2020 n'étant donc explicitement à donner
2021 - asCovObject : entrée des données comme un objet python, qui a les
2022 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
2023 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
2024 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
2025 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
2026 pleine doit être vérifié
2028 self.__name = str(name)
2029 self.__check = bool(toBeChecked)
2032 self.__is_scalar = False
2033 self.__is_vector = False
2034 self.__is_matrix = False
2035 self.__is_object = False
2037 if asScript is not None:
2038 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
2040 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2042 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2044 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2046 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2048 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
2050 if __Scalar is not None:
2051 if isinstance(__Scalar, str):
2052 __Scalar = PlatformInfo.strvect2liststr( __Scalar )
2053 if len(__Scalar) > 0: __Scalar = __Scalar[0]
2054 if numpy.array(__Scalar).size != 1:
2056 " The diagonal multiplier given to define a sparse matrix is"+\
2057 " not a unique scalar value.\n Its actual measured size is"+\
2058 " %i. Please check your scalar input."%numpy.array(__Scalar).size)
2059 self.__is_scalar = True
2060 self.__C = numpy.abs( float(__Scalar) )
2063 elif __Vector is not None:
2064 if isinstance(__Vector, str):
2065 __Vector = PlatformInfo.strvect2liststr( __Vector )
2066 self.__is_vector = True
2067 self.__C = numpy.abs( numpy.ravel(numpy.asarray( __Vector, dtype=float )) )
2068 self.shape = (self.__C.size,self.__C.size)
2069 self.size = self.__C.size**2
2070 elif __Matrix is not None:
2071 self.__is_matrix = True
2072 self.__C = numpy.matrix( __Matrix, float )
2073 self.shape = self.__C.shape
2074 self.size = self.__C.size
2075 elif __Object is not None:
2076 self.__is_object = True
2078 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__matmul__","__mul__","__rmatmul__","__rmul__"):
2079 if not hasattr(self.__C,at):
2080 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
2081 if hasattr(self.__C,"shape"):
2082 self.shape = self.__C.shape
2085 if hasattr(self.__C,"size"):
2086 self.size = self.__C.size
2094 def __validate(self):
2096 if self.__C is None:
2097 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
2098 if self.ismatrix() and min(self.shape) != max(self.shape):
2099 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
2100 if self.isobject() and min(self.shape) != max(self.shape):
2101 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
2102 if self.isscalar() and self.__C <= 0:
2103 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
2104 if self.isvector() and (self.__C <= 0).any():
2105 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
2106 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
2108 numpy.linalg.cholesky( self.__C )
2110 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
2111 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
2115 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
2118 "Vérification du type interne"
2119 return self.__is_scalar
2122 "Vérification du type interne"
2123 return self.__is_vector
2126 "Vérification du type interne"
2127 return self.__is_matrix
2130 "Vérification du type interne"
2131 return self.__is_object
2136 return Covariance(self.__name+"I", asCovariance = numpy.linalg.inv(self.__C) )
2137 elif self.isvector():
2138 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
2139 elif self.isscalar():
2140 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
2141 elif self.isobject() and hasattr(self.__C,"getI"):
2142 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
2144 return None # Indispensable
2149 return Covariance(self.__name+"T", asCovariance = self.__C.T )
2150 elif self.isvector():
2151 return Covariance(self.__name+"T", asEyeByVector = self.__C )
2152 elif self.isscalar():
2153 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
2154 elif self.isobject() and hasattr(self.__C,"getT"):
2155 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
2157 raise AttributeError("the %s covariance matrix has no getT attribute."%(self.__name,))
2160 "Décomposition de Cholesky"
2162 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
2163 elif self.isvector():
2164 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
2165 elif self.isscalar():
2166 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
2167 elif self.isobject() and hasattr(self.__C,"cholesky"):
2168 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
2170 raise AttributeError("the %s covariance matrix has no cholesky attribute."%(self.__name,))
2172 def choleskyI(self):
2173 "Inversion de la décomposition de Cholesky"
2175 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.linalg.cholesky(self.__C)) )
2176 elif self.isvector():
2177 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
2178 elif self.isscalar():
2179 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
2180 elif self.isobject() and hasattr(self.__C,"choleskyI"):
2181 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
2183 raise AttributeError("the %s covariance matrix has no choleskyI attribute."%(self.__name,))
2186 "Racine carrée matricielle"
2189 return Covariance(self.__name+"C", asCovariance = numpy.real(scipy.linalg.sqrtm(self.__C)) )
2190 elif self.isvector():
2191 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
2192 elif self.isscalar():
2193 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
2194 elif self.isobject() and hasattr(self.__C,"sqrtm"):
2195 return Covariance(self.__name+"C", asCovObject = self.__C.sqrtm() )
2197 raise AttributeError("the %s covariance matrix has no sqrtm attribute."%(self.__name,))
2200 "Inversion de la racine carrée matricielle"
2203 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.real(scipy.linalg.sqrtm(self.__C))) )
2204 elif self.isvector():
2205 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
2206 elif self.isscalar():
2207 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
2208 elif self.isobject() and hasattr(self.__C,"sqrtmI"):
2209 return Covariance(self.__name+"H", asCovObject = self.__C.sqrtmI() )
2211 raise AttributeError("the %s covariance matrix has no sqrtmI attribute."%(self.__name,))
2213 def diag(self, msize=None):
2214 "Diagonale de la matrice"
2216 return numpy.diag(self.__C)
2217 elif self.isvector():
2219 elif self.isscalar():
2221 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2223 return self.__C * numpy.ones(int(msize))
2224 elif self.isobject() and hasattr(self.__C,"diag"):
2225 return self.__C.diag()
2227 raise AttributeError("the %s covariance matrix has no diag attribute."%(self.__name,))
2229 def trace(self, msize=None):
2230 "Trace de la matrice"
2232 return numpy.trace(self.__C)
2233 elif self.isvector():
2234 return float(numpy.sum(self.__C))
2235 elif self.isscalar():
2237 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2239 return self.__C * int(msize)
2240 elif self.isobject():
2241 return self.__C.trace()
2243 raise AttributeError("the %s covariance matrix has no trace attribute."%(self.__name,))
2245 def asfullmatrix(self, msize=None):
2248 return numpy.asarray(self.__C, dtype=float)
2249 elif self.isvector():
2250 return numpy.asarray( numpy.diag(self.__C), dtype=float )
2251 elif self.isscalar():
2253 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2255 return numpy.asarray( self.__C * numpy.eye(int(msize)), dtype=float )
2256 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
2257 return self.__C.asfullmatrix()
2259 raise AttributeError("the %s covariance matrix has no asfullmatrix attribute."%(self.__name,))
2261 def assparsematrix(self):
2269 "x.__repr__() <==> repr(x)"
2270 return repr(self.__C)
2273 "x.__str__() <==> str(x)"
2274 return str(self.__C)
2276 def __add__(self, other):
2277 "x.__add__(y) <==> x+y"
2278 if self.ismatrix() or self.isobject():
2279 return self.__C + numpy.asmatrix(other)
2280 elif self.isvector() or self.isscalar():
2281 _A = numpy.asarray(other)
2282 if len(_A.shape) == 1:
2283 _A.reshape((-1,1))[::2] += self.__C
2285 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
2286 return numpy.asmatrix(_A)
2288 def __radd__(self, other):
2289 "x.__radd__(y) <==> y+x"
2290 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
2292 def __sub__(self, other):
2293 "x.__sub__(y) <==> x-y"
2294 if self.ismatrix() or self.isobject():
2295 return self.__C - numpy.asmatrix(other)
2296 elif self.isvector() or self.isscalar():
2297 _A = numpy.asarray(other)
2298 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
2299 return numpy.asmatrix(_A)
2301 def __rsub__(self, other):
2302 "x.__rsub__(y) <==> y-x"
2303 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
2306 "x.__neg__() <==> -x"
2309 def __matmul__(self, other):
2310 "x.__mul__(y) <==> x@y"
2311 if self.ismatrix() and isinstance(other, (int, float)):
2312 return numpy.asarray(self.__C) * other
2313 elif self.ismatrix() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2314 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2315 return numpy.ravel(self.__C @ numpy.ravel(other))
2316 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2317 return numpy.asarray(self.__C) @ numpy.asarray(other)
2319 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asarray(other).shape,self.__name))
2320 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2321 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2322 return numpy.ravel(self.__C) * numpy.ravel(other)
2323 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2324 return numpy.ravel(self.__C).reshape((-1,1)) * numpy.asarray(other)
2326 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2327 elif self.isscalar() and isinstance(other,numpy.matrix):
2328 return numpy.asarray(self.__C * other)
2329 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2330 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2331 return self.__C * numpy.ravel(other)
2333 return self.__C * numpy.asarray(other)
2334 elif self.isobject():
2335 return self.__C.__matmul__(other)
2337 raise NotImplementedError("%s covariance matrix __matmul__ method not available for %s type!"%(self.__name,type(other)))
2339 def __mul__(self, other):
2340 "x.__mul__(y) <==> x*y"
2341 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2342 return self.__C * other
2343 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2344 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2345 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2346 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2347 return self.__C * numpy.asmatrix(other)
2350 "operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
2351 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2352 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2353 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
2354 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2355 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
2358 "operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2359 elif self.isscalar() and isinstance(other,numpy.matrix):
2360 return self.__C * other
2361 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2362 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2363 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2365 return self.__C * numpy.asmatrix(other)
2366 elif self.isobject():
2367 return self.__C.__mul__(other)
2369 raise NotImplementedError(
2370 "%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
2372 def __rmatmul__(self, other):
2373 "x.__rmul__(y) <==> y@x"
2374 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2375 return other * self.__C
2376 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2377 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2378 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2379 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2380 return numpy.asmatrix(other) * self.__C
2383 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2384 elif self.isvector() and isinstance(other,numpy.matrix):
2385 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2386 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2387 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2388 return numpy.asmatrix(numpy.array(other) * self.__C)
2391 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2392 elif self.isscalar() and isinstance(other,numpy.matrix):
2393 return other * self.__C
2394 elif self.isobject():
2395 return self.__C.__rmatmul__(other)
2397 raise NotImplementedError(
2398 "%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name,type(other)))
2400 def __rmul__(self, other):
2401 "x.__rmul__(y) <==> y*x"
2402 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2403 return other * self.__C
2404 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2405 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2406 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2407 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2408 return numpy.asmatrix(other) * self.__C
2411 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2412 elif self.isvector() and isinstance(other,numpy.matrix):
2413 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2414 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2415 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2416 return numpy.asmatrix(numpy.array(other) * self.__C)
2419 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2420 elif self.isscalar() and isinstance(other,numpy.matrix):
2421 return other * self.__C
2422 elif self.isscalar() and isinstance(other,float):
2423 return other * self.__C
2424 elif self.isobject():
2425 return self.__C.__rmul__(other)
2427 raise NotImplementedError(
2428 "%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
2431 "x.__len__() <==> len(x)"
2432 return self.shape[0]
2434 # ==============================================================================
2435 class Observer2Func(object):
2437 Création d'une fonction d'observateur a partir de son texte
2439 __slots__ = ("__corps")
2441 def __init__(self, corps=""):
2442 self.__corps = corps
2443 def func(self,var,info):
2444 "Fonction d'observation"
2447 "Restitution du pointeur de fonction dans l'objet"
2450 # ==============================================================================
2451 class CaseLogger(object):
2453 Conservation des commandes de création d'un cas
2456 "__name", "__objname", "__logSerie", "__switchoff", "__viewers",
2460 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
2461 self.__name = str(__name)
2462 self.__objname = str(__objname)
2463 self.__logSerie = []
2464 self.__switchoff = False
2466 "TUI" :Interfaces._TUIViewer,
2467 "SCD" :Interfaces._SCDViewer,
2468 "YACS":Interfaces._YACSViewer,
2469 "SimpleReportInRst":Interfaces._SimpleReportInRstViewer,
2470 "SimpleReportInHtml":Interfaces._SimpleReportInHtmlViewer,
2471 "SimpleReportInPlainTxt":Interfaces._SimpleReportInPlainTxtViewer,
2474 "TUI" :Interfaces._TUIViewer,
2475 "COM" :Interfaces._COMViewer,
2477 if __addViewers is not None:
2478 self.__viewers.update(dict(__addViewers))
2479 if __addLoaders is not None:
2480 self.__loaders.update(dict(__addLoaders))
2482 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
2483 "Enregistrement d'une commande individuelle"
2484 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
2485 if "self" in __keys: __keys.remove("self")
2486 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
2488 self.__switchoff = True
2490 self.__switchoff = False
2492 def dump(self, __filename=None, __format="TUI", __upa=""):
2493 "Restitution normalisée des commandes"
2494 if __format in self.__viewers:
2495 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2497 raise ValueError("Dumping as \"%s\" is not available"%__format)
2498 return __formater.dump(__filename, __upa)
2500 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2501 "Chargement normalisé des commandes"
2502 if __format in self.__loaders:
2503 __formater = self.__loaders[__format]()
2505 raise ValueError("Loading as \"%s\" is not available"%__format)
2506 return __formater.load(__filename, __content, __object)
2508 # ==============================================================================
2511 _extraArguments = None,
2512 _sFunction = lambda x: x,
2517 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2518 correspondante de valeurs de la fonction en argument
2520 # Vérifications et définitions initiales
2521 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2522 if not PlatformInfo.isIterable( __xserie ):
2523 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2525 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2528 __mpWorkers = int(_mpWorkers)
2530 import multiprocessing
2541 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2542 with multiprocessing.Pool(__mpWorkers) as pool:
2543 __multiHX = pool.map( _sFunction, _jobs )
2546 # logging.debug("MULTF Internal multiprocessing calculation end")
2548 # logging.debug("MULTF Internal monoprocessing calculation begin")
2550 if _extraArguments is None:
2551 for __xvalue in __xserie:
2552 __multiHX.append( _sFunction( __xvalue ) )
2553 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2554 for __xvalue in __xserie:
2555 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2556 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2557 for __xvalue in __xserie:
2558 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2560 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2561 # logging.debug("MULTF Internal monoprocessing calculation end")
2563 # logging.debug("MULTF Internal multifonction calculations end")
2566 # ==============================================================================
2567 if __name__ == "__main__":
2568 print("\n AUTODIAGNOSTIC\n")