1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2024 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
36 from functools import partial
37 from daCore import Persistence
38 from daCore import PlatformInfo
39 from daCore import Interfaces
40 from daCore import Templates
42 # ==============================================================================
43 class CacheManager(object):
45 Classe générale de gestion d'un cache de calculs
48 "__tolerBP", "__lengthOR", "__initlnOR", "__seenNames", "__enabled",
53 toleranceInRedundancy = 1.e-18,
54 lengthOfRedundancy = -1 ):
56 Les caractéristiques de tolérance peuvent être modifiées à la création.
58 self.__tolerBP = float(toleranceInRedundancy)
59 self.__lengthOR = int(lengthOfRedundancy)
60 self.__initlnOR = self.__lengthOR
70 def wasCalculatedIn(self, xValue, oName="" ):
71 "Vérifie l'existence d'un calcul correspondant à la valeur"
75 for i in range(min(len(self.__listOPCV), self.__lengthOR) - 1, -1, -1):
76 if not hasattr(xValue, 'size'):
78 elif (str(oName) != self.__listOPCV[i][3]):
80 elif (xValue.size != self.__listOPCV[i][0].size):
82 elif (numpy.ravel(xValue)[0] - self.__listOPCV[i][0][0]) > (self.__tolerBP * self.__listOPCV[i][2] / self.__listOPCV[i][0].size):
84 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < (self.__tolerBP * self.__listOPCV[i][2]):
86 __HxV = self.__listOPCV[i][1]
90 def storeValueInX(self, xValue, HxValue, oName="" ):
91 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
92 if self.__lengthOR < 0:
93 self.__lengthOR = 2 * min(numpy.size(xValue), 50) + 2
94 self.__initlnOR = self.__lengthOR
95 self.__seenNames.append(str(oName))
96 if str(oName) not in self.__seenNames: # Étend la liste si nouveau
97 self.__lengthOR += 2 * min(numpy.size(xValue), 50) + 2
98 self.__initlnOR += self.__lengthOR
99 self.__seenNames.append(str(oName))
100 while len(self.__listOPCV) > self.__lengthOR:
101 self.__listOPCV.pop(0)
102 self.__listOPCV.append((
103 copy.copy(numpy.ravel(xValue)), # 0 Previous point
104 copy.copy(HxValue), # 1 Previous value
105 numpy.linalg.norm(xValue), # 2 Norm
106 str(oName), # 3 Operator name
111 self.__initlnOR = self.__lengthOR
113 self.__enabled = False
117 self.__lengthOR = self.__initlnOR
118 self.__enabled = True
120 # ==============================================================================
121 class Operator(object):
123 Classe générale d'interface de type opérateur simple
126 "__name", "__NbCallsAsMatrix", "__NbCallsAsMethod",
127 "__NbCallsOfCached", "__reduceM", "__avoidRC", "__inputAsMF",
128 "__mpEnabled", "__extraArgs", "__Method", "__Matrix", "__Type",
137 name = "GenericOperator",
140 avoidingRedundancy = True,
141 reducingMemoryUse = False,
142 inputAsMultiFunction = False,
143 enableMultiProcess = False,
144 extraArguments = None ):
146 On construit un objet de ce type en fournissant, à l'aide de l'un des
147 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
150 - name : nom d'opérateur
151 - fromMethod : argument de type fonction Python
152 - fromMatrix : argument adapté au constructeur numpy.array/matrix
153 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
154 - reducingMemoryUse : booléen forçant (ou pas) des calculs moins
156 - inputAsMultiFunction : booléen indiquant une fonction explicitement
157 définie (ou pas) en multi-fonction
158 - extraArguments : arguments supplémentaires passés à la fonction de
159 base et ses dérivées (tuple ou dictionnaire)
161 self.__name = str(name)
162 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
163 self.__reduceM = bool( reducingMemoryUse )
164 self.__avoidRC = bool( avoidingRedundancy )
165 self.__inputAsMF = bool( inputAsMultiFunction )
166 self.__mpEnabled = bool( enableMultiProcess )
167 self.__extraArgs = extraArguments
168 if fromMethod is not None and self.__inputAsMF:
169 self.__Method = fromMethod # logtimer(fromMethod)
171 self.__Type = "Method"
172 elif fromMethod is not None and not self.__inputAsMF:
173 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
175 self.__Type = "Method"
176 elif fromMatrix is not None:
178 if isinstance(fromMatrix, str):
179 fromMatrix = PlatformInfo.strmatrix2liststr( fromMatrix )
180 self.__Matrix = numpy.asarray( fromMatrix, dtype=float )
181 self.__Type = "Matrix"
187 def disableAvoidingRedundancy(self):
189 Operator.CM.disable()
191 def enableAvoidingRedundancy(self):
196 Operator.CM.disable()
202 def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False):
204 Permet de restituer le résultat de l'application de l'opérateur à une
205 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
206 argument devant a priori être du bon type.
208 - les arguments par série sont :
209 - xValue : argument adapté pour appliquer l'opérateur
210 - HValue : valeur précalculée de l'opérateur en ce point
211 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
218 if HValue is not None:
222 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
224 if _HValue is not None:
225 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
227 for i in range(len(_HValue)):
228 _HxValue.append( _HValue[i] )
230 Operator.CM.storeValueInX(_xValue[i], _HxValue[-1], self.__name)
235 for i, xv in enumerate(_xValue):
237 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv, self.__name)
239 __alreadyCalculated = False
241 if __alreadyCalculated:
242 self.__addOneCacheCall()
245 if self.__Matrix is not None:
246 self.__addOneMatrixCall()
247 _hv = self.__Matrix @ numpy.ravel(xv)
249 self.__addOneMethodCall()
253 _HxValue.append( _hv )
255 if len(_xserie) > 0 and self.__Matrix is None:
256 if self.__extraArgs is None:
257 _hserie = self.__Method( _xserie ) # Calcul MF
259 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
260 if not hasattr(_hserie, "pop"):
262 "The user input multi-function doesn't seem to return a" + \
263 " result sequence, behaving like a mono-function. It has" + \
270 Operator.CM.storeValueInX(_xv, _hv, self.__name)
272 if returnSerieAsArrayMatrix:
273 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
275 if argsAsSerie: return _HxValue # noqa: E701
276 else: return _HxValue[-1] # noqa: E241,E272,E701
278 def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
280 Permet de restituer le résultat de l'application de l'opérateur à des
281 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
282 argument devant a priori être du bon type. Si la uValue est None,
283 on suppose que l'opérateur ne s'applique qu'à xValue.
285 - paires : les arguments par paire sont :
286 - xValue : argument X adapté pour appliquer l'opérateur
287 - uValue : argument U adapté pour appliquer l'opérateur
288 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
290 if argsAsSerie: _xuValue = paires # noqa: E701
291 else: _xuValue = (paires,) # noqa: E241,E272,E701
292 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
294 if self.__Matrix is not None:
296 for paire in _xuValue:
297 _xValue, _uValue = paire
298 self.__addOneMatrixCall()
299 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
302 for paire in _xuValue:
303 _xValue, _uValue = paire
304 if _uValue is not None:
305 _xuArgs.append( paire )
307 _xuArgs.append( _xValue )
308 self.__addOneMethodCall( len(_xuArgs) )
309 if self.__extraArgs is None:
310 _HxValue = self.__Method( _xuArgs ) # Calcul MF
312 _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF
314 if returnSerieAsArrayMatrix:
315 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
317 if argsAsSerie: return _HxValue # noqa: E701
318 else: return _HxValue[-1] # noqa: E241,E272,E701
320 def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
322 Permet de restituer le résultat de l'application de l'opérateur à une
323 série d'arguments xValue, sachant que l'opérateur est valable en
324 xNominal. Cette méthode se contente d'appliquer, son argument devant a
325 priori être du bon type. Si l'opérateur est linéaire car c'est une
326 matrice, alors il est valable en tout point nominal et xNominal peut
327 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
328 permet d'indiquer que l'argument est multi-paires.
330 - paires : les arguments par paire sont :
331 - xNominal : série d'arguments permettant de donner le point où
332 l'opérateur est construit pour être ensuite appliqué
333 - xValue : série d'arguments adaptés pour appliquer l'opérateur
334 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
336 if argsAsSerie: _nxValue = paires # noqa: E701
337 else: _nxValue = (paires,) # noqa: E241,E272,E701
338 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
340 if self.__Matrix is not None:
342 for paire in _nxValue:
343 _xNominal, _xValue = paire
344 self.__addOneMatrixCall()
345 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
347 self.__addOneMethodCall( len(_nxValue) )
348 if self.__extraArgs is None:
349 _HxValue = self.__Method( _nxValue ) # Calcul MF
351 _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
353 if returnSerieAsArrayMatrix:
354 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
356 if argsAsSerie: return _HxValue # noqa: E701
357 else: return _HxValue[-1] # noqa: E241,E272,E701
359 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
361 Permet de renvoyer l'opérateur sous la forme d'une matrice
363 if self.__Matrix is not None:
364 self.__addOneMatrixCall()
365 mValue = [self.__Matrix,]
366 elif not isinstance(ValueForMethodForm, str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
369 self.__addOneMethodCall( len(ValueForMethodForm) )
370 for _vfmf in ValueForMethodForm:
371 mValue.append( self.__Method(((_vfmf, None),)) )
373 self.__addOneMethodCall()
374 mValue = self.__Method(((ValueForMethodForm, None),))
376 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
378 if argsAsSerie: return mValue # noqa: E701
379 else: return mValue[-1] # noqa: E241,E272,E701
383 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
384 la forme d'une matrice
386 if self.__Matrix is not None:
387 return self.__Matrix.shape
389 raise ValueError("Matrix form of the operator is not available, nor the shape")
391 def nbcalls(self, which=None):
393 Renvoie les nombres d'évaluations de l'opérateur
396 self.__NbCallsAsMatrix + self.__NbCallsAsMethod,
397 self.__NbCallsAsMatrix,
398 self.__NbCallsAsMethod,
399 self.__NbCallsOfCached,
400 Operator.NbCallsAsMatrix + Operator.NbCallsAsMethod,
401 Operator.NbCallsAsMatrix,
402 Operator.NbCallsAsMethod,
403 Operator.NbCallsOfCached,
405 if which is None: return __nbcalls # noqa: E701
406 else: return __nbcalls[which] # noqa: E241,E272,E701
408 def __addOneMatrixCall(self):
409 "Comptabilise un appel"
410 self.__NbCallsAsMatrix += 1 # Decompte local
411 Operator.NbCallsAsMatrix += 1 # Decompte global
413 def __addOneMethodCall(self, nb = 1):
414 "Comptabilise un appel"
415 self.__NbCallsAsMethod += nb # Decompte local
416 Operator.NbCallsAsMethod += nb # Decompte global
418 def __addOneCacheCall(self):
419 "Comptabilise un appel"
420 self.__NbCallsOfCached += 1 # Décompte local
421 Operator.NbCallsOfCached += 1 # Décompte global
423 # ==============================================================================
424 class FullOperator(object):
426 Classe générale d'interface de type opérateur complet
427 (Direct, Linéaire Tangent, Adjoint)
430 "__name", "__check", "__extraArgs", "__FO", "__T",
434 name = "GenericFullOperator",
436 asOneFunction = None, # 1 Fonction
437 asThreeFunctions = None, # 3 Fonctions in a dictionary
438 asScript = None, # 1 or 3 Fonction(s) by script
439 asDict = None, # Parameters
441 extraArguments = None,
442 performancePrf = None,
443 inputAsMF = False, # Fonction(s) as Multi-Functions
445 toBeChecked = False ):
447 self.__name = str(name)
448 self.__check = bool(toBeChecked)
449 self.__extraArgs = extraArguments
454 if (asDict is not None) and isinstance(asDict, dict):
455 __Parameters.update( asDict )
456 # Priorité à EnableMultiProcessingInDerivatives=True
457 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
458 __Parameters["EnableMultiProcessingInDerivatives"] = True
459 __Parameters["EnableMultiProcessingInEvaluation"] = False
460 if "EnableMultiProcessingInDerivatives" not in __Parameters:
461 __Parameters["EnableMultiProcessingInDerivatives"] = False
462 if __Parameters["EnableMultiProcessingInDerivatives"]:
463 __Parameters["EnableMultiProcessingInEvaluation"] = False
464 if "EnableMultiProcessingInEvaluation" not in __Parameters:
465 __Parameters["EnableMultiProcessingInEvaluation"] = False
466 if "withIncrement" in __Parameters: # Temporaire
467 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
468 # Le défaut est équivalent à "ReducedOverallRequirements"
469 __reduceM, __avoidRC = True, True
470 if performancePrf is not None:
471 if performancePrf == "ReducedAmountOfCalculation":
472 __reduceM, __avoidRC = False, True
473 elif performancePrf == "ReducedMemoryFootprint":
474 __reduceM, __avoidRC = True, False
475 elif performancePrf == "NoSavings":
476 __reduceM, __avoidRC = False, False
478 if asScript is not None:
479 __Matrix, __Function = None, None
481 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
483 __Function = { "Direct": Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
484 __Function.update({"useApproximatedDerivatives": True})
485 __Function.update(__Parameters)
486 elif asThreeFunctions:
488 "Direct": Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
489 "Tangent": Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
490 "Adjoint": Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
492 __Function.update(__Parameters)
495 if asOneFunction is not None:
496 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
497 if asOneFunction["Direct"] is not None:
498 __Function = asOneFunction
500 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
502 __Function = { "Direct": asOneFunction }
503 __Function.update({"useApproximatedDerivatives": True})
504 __Function.update(__Parameters)
505 elif asThreeFunctions is not None:
506 if isinstance(asThreeFunctions, dict) and \
507 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
508 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
509 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
510 __Function = asThreeFunctions
511 elif isinstance(asThreeFunctions, dict) and \
512 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
513 __Function = asThreeFunctions
514 __Function.update({"useApproximatedDerivatives": True})
517 "The functions has to be given in a dictionnary which have either" + \
518 " 1 key (\"Direct\") or" + \
519 " 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
520 if "Direct" not in asThreeFunctions:
521 __Function["Direct"] = asThreeFunctions["Tangent"]
522 __Function.update(__Parameters)
526 if appliedInX is not None and isinstance(appliedInX, dict):
527 __appliedInX = appliedInX
528 elif appliedInX is not None:
529 __appliedInX = {"HXb": appliedInX}
533 if scheduledBy is not None:
534 self.__T = scheduledBy
536 if isinstance(__Function, dict) and \
537 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
538 ("Direct" in __Function) and (__Function["Direct"] is not None):
539 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False # noqa: E272,E701
540 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01 # noqa: E272,E701
541 if "withdX" not in __Function: __Function["withdX"] = None # noqa: E272,E701
542 if "withReducingMemoryUse" not in __Function: __Function["withReducingMemoryUse"] = __reduceM # noqa: E272,E701
543 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = __avoidRC # noqa: E272,E701
544 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18 # noqa: E272,E701
545 if "withLengthOfRedundancy" not in __Function: __Function["withLengthOfRedundancy"] = -1 # noqa: E272,E701
546 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None # noqa: E272,E701
547 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF # noqa: E272,E701
548 from daCore import NumericObjects
549 FDA = NumericObjects.FDApproximation(
551 Function = __Function["Direct"],
552 centeredDF = __Function["CenteredFiniteDifference"],
553 increment = __Function["DifferentialIncrement"],
554 dX = __Function["withdX"],
555 extraArguments = self.__extraArgs,
556 reducingMemoryUse = __Function["withReducingMemoryUse"],
557 avoidingRedundancy = __Function["withAvoidingRedundancy"],
558 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
559 lengthOfRedundancy = __Function["withLengthOfRedundancy"],
560 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
561 mpWorkers = __Function["NumberOfProcesses"],
562 mfEnabled = __Function["withmfEnabled"],
564 self.__FO["Direct"] = Operator(
566 fromMethod = FDA.DirectOperator,
567 reducingMemoryUse = __reduceM,
568 avoidingRedundancy = __avoidRC,
569 inputAsMultiFunction = inputAsMF,
570 extraArguments = self.__extraArgs,
571 enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
572 self.__FO["Tangent"] = Operator(
573 name = self.__name + "Tangent",
574 fromMethod = FDA.TangentOperator,
575 reducingMemoryUse = __reduceM,
576 avoidingRedundancy = __avoidRC,
577 inputAsMultiFunction = inputAsMF,
578 extraArguments = self.__extraArgs )
579 self.__FO["Adjoint"] = Operator(
580 name = self.__name + "Adjoint",
581 fromMethod = FDA.AdjointOperator,
582 reducingMemoryUse = __reduceM,
583 avoidingRedundancy = __avoidRC,
584 inputAsMultiFunction = inputAsMF,
585 extraArguments = self.__extraArgs )
586 self.__FO["DifferentialIncrement"] = __Function["DifferentialIncrement"]
587 elif isinstance(__Function, dict) and \
588 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
589 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
590 self.__FO["Direct"] = Operator(
592 fromMethod = __Function["Direct"],
593 reducingMemoryUse = __reduceM,
594 avoidingRedundancy = __avoidRC,
595 inputAsMultiFunction = inputAsMF,
596 extraArguments = self.__extraArgs,
597 enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
598 self.__FO["Tangent"] = Operator(
599 name = self.__name + "Tangent",
600 fromMethod = __Function["Tangent"],
601 reducingMemoryUse = __reduceM,
602 avoidingRedundancy = __avoidRC,
603 inputAsMultiFunction = inputAsMF,
604 extraArguments = self.__extraArgs )
605 self.__FO["Adjoint"] = Operator(
606 name = self.__name + "Adjoint",
607 fromMethod = __Function["Adjoint"],
608 reducingMemoryUse = __reduceM,
609 avoidingRedundancy = __avoidRC,
610 inputAsMultiFunction = inputAsMF,
611 extraArguments = self.__extraArgs )
612 self.__FO["DifferentialIncrement"] = None
613 elif asMatrix is not None:
614 if isinstance(__Matrix, str):
615 __Matrix = PlatformInfo.strmatrix2liststr( __Matrix )
616 __matrice = numpy.asarray( __Matrix, dtype=float )
617 self.__FO["Direct"] = Operator(
619 fromMatrix = __matrice,
620 reducingMemoryUse = __reduceM,
621 avoidingRedundancy = __avoidRC,
622 inputAsMultiFunction = inputAsMF,
623 enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
624 self.__FO["Tangent"] = Operator(
625 name = self.__name + "Tangent",
626 fromMatrix = __matrice,
627 reducingMemoryUse = __reduceM,
628 avoidingRedundancy = __avoidRC,
629 inputAsMultiFunction = inputAsMF )
630 self.__FO["Adjoint"] = Operator(
631 name = self.__name + "Adjoint",
632 fromMatrix = __matrice.T,
633 reducingMemoryUse = __reduceM,
634 avoidingRedundancy = __avoidRC,
635 inputAsMultiFunction = inputAsMF )
637 self.__FO["DifferentialIncrement"] = None
640 "The %s object is improperly defined or undefined,"%self.__name + \
641 " it requires at minima either a matrix, a Direct operator for" + \
642 " approximate derivatives or a Tangent/Adjoint operators pair." + \
643 " Please check your operator input.")
645 if __appliedInX is not None:
646 self.__FO["AppliedInX"] = {}
647 for key in __appliedInX:
648 if isinstance(__appliedInX[key], str):
649 __appliedInX[key] = PlatformInfo.strvect2liststr( __appliedInX[key] )
650 self.__FO["AppliedInX"][key] = numpy.ravel( __appliedInX[key] ).reshape((-1, 1))
652 self.__FO["AppliedInX"] = None
657 def nbcalls(self, whot=None, which=None):
659 Renvoie les nombres d'évaluations de l'opérateur
662 for otype in ["Direct", "Tangent", "Adjoint"]:
663 if otype in self.__FO:
664 __nbcalls[otype] = self.__FO[otype].nbcalls()
665 if whot in __nbcalls and which is not None:
666 return __nbcalls[whot][which]
671 "x.__repr__() <==> repr(x)"
672 return repr(self.__FO)
675 "x.__str__() <==> str(x)"
676 return str(self.__FO)
678 # ==============================================================================
679 class Algorithm(object):
681 Classe générale d'interface de type algorithme
683 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
684 d'assimilation, en fournissant un container (dictionnaire) de variables
685 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
687 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
690 "_name", "_parameters", "__internal_state", "__required_parameters",
691 "_m", "__variable_names_not_public", "__canonical_parameter_name",
692 "__canonical_stored_name", "__replace_by_the_new_name",
696 def __init__(self, name):
698 L'initialisation présente permet de fabriquer des variables de stockage
699 disponibles de manière générique dans les algorithmes élémentaires. Ces
700 variables de stockage sont ensuite conservées dans un dictionnaire
701 interne à l'objet, mais auquel on accède par la méthode "get".
703 Les variables prévues sont :
704 - APosterioriCorrelations : matrice de corrélations de la matrice A
705 - APosterioriCovariance : matrice de covariances a posteriori : A
706 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
707 - APosterioriVariances : vecteur des variances de la matrice A
708 - Analysis : vecteur d'analyse : Xa
709 - BMA : Background moins Analysis : Xa - Xb
710 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
711 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
712 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
713 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
714 - CostFunctionJo : partie observations de la fonction-coût : Jo
715 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
716 - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
717 - CurrentOptimum : état optimal courant lors d'itérations
718 - CurrentState : état courant lors d'itérations
719 - CurrentStepNumber : pas courant d'avancement dans les algorithmes en évolution, à partir de 0
720 - EnsembleOfSimulations : ensemble d'états (sorties, simulations) rangés par colonne dans une matrice
721 - EnsembleOfSnapshots : ensemble d'états rangés par colonne dans une matrice
722 - EnsembleOfStates : ensemble d'états (entrées, paramètres) rangés par colonne dans une matrice
723 - ForecastCovariance : covariance de l'état prédit courant lors d'itérations
724 - ForecastState : état prédit courant lors d'itérations
725 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
726 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
727 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
728 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
729 - Innovation : l'innovation : d = Y - H(X)
730 - InnovationAtCurrentAnalysis : l'innovation à l'état analysé : da = Y - H(Xa)
731 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
732 - InternalCostFunctionJ : ensemble de valeurs internes de fonction-coût J dans un vecteur
733 - InternalCostFunctionJb : ensemble de valeurs internes de fonction-coût Jb dans un vecteur
734 - InternalCostFunctionJb : ensemble de valeurs internes de fonction-coût Jo dans un vecteur
735 - InternalStates : ensemble d'états internes rangés par colonne dans une matrice (=EnsembleOfSnapshots)
736 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
737 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
738 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
739 - KalmanGainAtOptimum : gain de Kalman à l'optimum
740 - MahalanobisConsistency : indicateur de consistance des covariances
741 - OMA : Observation moins Analyse : Y - Xa
742 - OMB : Observation moins Background : Y - Xb
743 - ReducedCoordinates : coordonnées dans la base réduite
744 - Residu : dans le cas des algorithmes de vérification
745 - SampledStateForQuantiles : échantillons d'états pour l'estimation des quantiles
746 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
747 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
748 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
749 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
750 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
751 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
752 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
753 - SingularValues : valeurs singulières provenant d'une décomposition SVD
754 On peut rajouter des variables à stocker dans l'initialisation de
755 l'algorithme élémentaire qui va hériter de cette classe
757 logging.debug("%s Initialisation", str(name))
758 self._m = PlatformInfo.SystemUsage()
760 self._name = str( name )
761 self._parameters = {"StoreSupplementaryCalculations": []}
762 self.__internal_state = {}
763 self.__required_parameters = {}
764 self.__required_inputs = {
765 "RequiredInputValues": {"mandatory": (), "optional": ()},
766 "AttributesTags": [],
767 "AttributesFeatures": [],
769 self.__variable_names_not_public = {"nextStep": False} # Duplication dans AlgorithmAndParameters
770 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
771 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
772 self.__replace_by_the_new_name = {} # Nouveau nom à partir d'un nom ancien
774 self.StoredVariables = {}
775 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
776 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
777 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
778 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
779 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
780 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
781 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
782 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
783 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
784 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
785 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
786 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
787 self.StoredVariables["CurrentEnsembleState"] = Persistence.OneMatrix(name = "CurrentEnsembleState")
788 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
789 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
790 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
791 self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
792 self.StoredVariables["EnsembleOfSimulations"] = Persistence.OneMatrice(name = "EnsembleOfSimulations")
793 self.StoredVariables["EnsembleOfSnapshots"] = Persistence.OneMatrice(name = "EnsembleOfSnapshots")
794 self.StoredVariables["EnsembleOfStates"] = Persistence.OneMatrice(name = "EnsembleOfStates")
795 self.StoredVariables["ExcludedPoints"] = Persistence.OneVector(name = "ExcludedPoints")
796 self.StoredVariables["ForecastCovariance"] = Persistence.OneMatrix(name = "ForecastCovariance")
797 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
798 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
799 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
800 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
801 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
802 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
803 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
804 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
805 self.StoredVariables["InternalCostFunctionJ"] = Persistence.OneVector(name = "InternalCostFunctionJ")
806 self.StoredVariables["InternalCostFunctionJb"] = Persistence.OneVector(name = "InternalCostFunctionJb")
807 self.StoredVariables["InternalCostFunctionJo"] = Persistence.OneVector(name = "InternalCostFunctionJo")
808 self.StoredVariables["InternalStates"] = Persistence.OneMatrix(name = "InternalStates")
809 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
810 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
811 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
812 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
813 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
814 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
815 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
816 self.StoredVariables["OptimalPoints"] = Persistence.OneVector(name = "OptimalPoints")
817 self.StoredVariables["ReducedBasis"] = Persistence.OneMatrix(name = "ReducedBasis")
818 self.StoredVariables["ReducedBasisMus"] = Persistence.OneVector(name = "ReducedBasisMus")
819 self.StoredVariables["ReducedCoordinates"] = Persistence.OneVector(name = "ReducedCoordinates")
820 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
821 self.StoredVariables["Residus"] = Persistence.OneVector(name = "Residus")
822 self.StoredVariables["SampledStateForQuantiles"] = Persistence.OneMatrix(name = "SampledStateForQuantiles")
823 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
824 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
825 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
826 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
827 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
828 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
829 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
830 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
831 self.StoredVariables["SingularValues"] = Persistence.OneVector(name = "SingularValues")
833 for k in self.StoredVariables:
834 self.__canonical_stored_name[k.lower()] = k
836 for k, v in self.__variable_names_not_public.items():
837 self.__canonical_parameter_name[k.lower()] = k
838 self.__canonical_parameter_name["algorithm"] = "Algorithm"
839 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
841 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
843 logging.debug("%s Lancement", self._name)
844 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
845 self._getTimeState(reset=True)
847 # Mise à jour des paramètres internes avec le contenu de Parameters, en
848 # reprenant les valeurs par défauts pour toutes celles non définies
849 self.__setParameters(Parameters, reset=True) # Copie
850 for k, v in self.__variable_names_not_public.items():
851 if k not in self._parameters:
852 self.__setParameters( {k: v} )
854 def __test_vvalue(argument, variable, argname, symbol=None):
855 "Corrections et compléments des vecteurs"
859 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
860 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name, argname, symbol))
861 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
862 logging.debug("%s %s vector %s is not set, but is optional."%(self._name, argname, symbol))
864 logging.debug("%s %s vector %s is not set, but is not required."%(self._name, argname, symbol))
866 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
868 "%s %s vector %s is required and set, and its full size is %i." \
869 % (self._name, argname, symbol, numpy.array(argument).size))
870 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
872 "%s %s vector %s is optional and set, and its full size is %i." \
873 % (self._name, argname, symbol, numpy.array(argument).size))
876 "%s %s vector %s is set although neither required nor optional, and its full size is %i." \
877 % (self._name, argname, symbol, numpy.array(argument).size))
879 __test_vvalue( Xb, "Xb", "Background or initial state" )
880 __test_vvalue( Y, "Y", "Observation" )
881 __test_vvalue( U, "U", "Control" )
883 def __test_cvalue(argument, variable, argname, symbol=None):
884 "Corrections et compléments des covariances"
888 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
889 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name, argname, symbol))
890 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
891 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name, argname, symbol))
893 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name, argname, symbol))
895 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
896 logging.debug("%s %s error covariance matrix %s is required and set."%(self._name, argname, symbol))
897 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
898 logging.debug("%s %s error covariance matrix %s is optional and set."%(self._name, argname, symbol))
901 "%s %s error covariance matrix %s is set although neither required nor optional." \
902 % (self._name, argname, symbol))
904 __test_cvalue( B, "B", "Background" )
905 __test_cvalue( R, "R", "Observation" )
906 __test_cvalue( Q, "Q", "Evolution" )
908 def __test_ovalue(argument, variable, argname, symbol=None):
909 "Corrections et compléments des opérateurs"
912 if argument is None or (isinstance(argument, dict) and len(argument) == 0):
913 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
914 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name, argname, symbol))
915 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
916 logging.debug("%s %s operator %s is not set, but is optional."%(self._name, argname, symbol))
918 logging.debug("%s %s operator %s is not set, but is not required."%(self._name, argname, symbol))
920 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
921 logging.debug("%s %s operator %s is required and set."%(self._name, argname, symbol))
922 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
923 logging.debug("%s %s operator %s is optional and set."%(self._name, argname, symbol))
925 logging.debug("%s %s operator %s is set although neither required nor optional."%(self._name, argname, symbol))
927 __test_ovalue( HO, "HO", "Observation", "H" )
928 __test_ovalue( EM, "EM", "Evolution", "M" )
929 __test_ovalue( CM, "CM", "Control Model", "C" )
931 # Corrections et compléments des bornes
932 if ("Bounds" in self._parameters) \
933 and isinstance(self._parameters["Bounds"], (list, tuple)):
934 if (len(self._parameters["Bounds"]) > 0):
935 logging.debug("%s Bounds taken into account"%(self._name,))
937 self._parameters["Bounds"] = None
938 elif ("Bounds" in self._parameters) \
939 and isinstance(self._parameters["Bounds"], (numpy.ndarray, numpy.matrix)):
940 self._parameters["Bounds"] = numpy.ravel(self._parameters["Bounds"]).reshape((-1, 2)).tolist()
941 if (len(self._parameters["Bounds"]) > 0):
942 logging.debug("%s Bounds for states taken into account"%(self._name,))
944 self._parameters["Bounds"] = None
946 self._parameters["Bounds"] = None
947 if self._parameters["Bounds"] is None:
948 logging.debug("%s There are no bounds for states to take into account"%(self._name,))
950 if ("StateBoundsForQuantiles" in self._parameters) \
951 and isinstance(self._parameters["StateBoundsForQuantiles"], (list, tuple)) \
952 and (len(self._parameters["StateBoundsForQuantiles"]) > 0):
953 logging.debug("%s Bounds for quantiles states taken into account"%(self._name,))
954 elif ("StateBoundsForQuantiles" in self._parameters) \
955 and isinstance(self._parameters["StateBoundsForQuantiles"], (numpy.ndarray, numpy.matrix)):
956 self._parameters["StateBoundsForQuantiles"] = numpy.ravel(self._parameters["StateBoundsForQuantiles"]).reshape((-1, 2)).tolist()
957 if (len(self._parameters["StateBoundsForQuantiles"]) > 0):
958 logging.debug("%s Bounds for quantiles states taken into account"%(self._name,))
959 # Attention : contrairement à Bounds, il n'y a pas de défaut à None,
960 # sinon on ne peut pas être sans bornes
962 # Corrections et compléments de l'initialisation en X
963 if "InitializationPoint" in self._parameters:
965 if self._parameters["InitializationPoint"] is not None and hasattr(self._parameters["InitializationPoint"], 'size'):
966 if self._parameters["InitializationPoint"].size != numpy.ravel(Xb).size:
968 "Incompatible size %i of forced initial point that have to replace the background of size %i" \
969 % (self._parameters["InitializationPoint"].size, numpy.ravel(Xb).size))
970 # Obtenu par typecast : numpy.ravel(self._parameters["InitializationPoint"])
972 self._parameters["InitializationPoint"] = numpy.ravel(Xb)
974 if self._parameters["InitializationPoint"] is None:
975 raise ValueError("Forced initial point can not be set without any given Background or required value")
977 # Correction pour pallier a un bug de TNC sur le retour du Minimum
978 if "Minimizer" in self._parameters and self._parameters["Minimizer"] == "TNC":
979 self.setParameterValue("StoreInternalVariables", True)
981 # Verbosité et logging
982 if logging.getLogger().level < logging.WARNING:
983 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
984 self._parameters["optmessages"] = 15
986 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
987 self._parameters["optmessages"] = 0
991 def _post_run(self, _oH=None, _oM=None):
993 if ("StoreSupplementaryCalculations" in self._parameters) and \
994 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
995 for _A in self.StoredVariables["APosterioriCovariance"]:
996 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
997 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
998 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
999 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
1000 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
1001 _EI = numpy.diag(1. / numpy.sqrt(numpy.diag(_A)))
1002 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
1003 self.StoredVariables["APosterioriCorrelations"].store( _C )
1004 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
1006 "%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i",
1007 self._name, _oH["Direct"].nbcalls(0), _oH["Tangent"].nbcalls(0), _oH["Adjoint"].nbcalls(0))
1009 "%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i",
1010 self._name, _oH["Direct"].nbcalls(3), _oH["Tangent"].nbcalls(3), _oH["Adjoint"].nbcalls(3))
1011 if _oM is not None and "Direct" in _oM and "Tangent" in _oM and "Adjoint" in _oM:
1013 "%s Nombre d'évaluation(s) de l'opérateur d'évolution direct/tangent/adjoint.: %i/%i/%i",
1014 self._name, _oM["Direct"].nbcalls(0), _oM["Tangent"].nbcalls(0), _oM["Adjoint"].nbcalls(0))
1016 "%s Nombre d'appels au cache d'opérateur d'évolution direct/tangent/adjoint..: %i/%i/%i",
1017 self._name, _oM["Direct"].nbcalls(3), _oM["Tangent"].nbcalls(3), _oM["Adjoint"].nbcalls(3))
1018 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
1019 logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
1020 logging.debug("%s Terminé", self._name)
1023 def _toStore(self, key):
1024 "True if in StoreSupplementaryCalculations, else False"
1025 return key in self._parameters["StoreSupplementaryCalculations"]
1027 def get(self, key=None):
1029 Renvoie l'une des variables stockées identifiée par la clé, ou le
1030 dictionnaire de l'ensemble des variables disponibles en l'absence de
1031 clé. Ce sont directement les variables sous forme objet qui sont
1032 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
1033 des classes de persistance.
1036 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
1038 return self.StoredVariables
1040 def __contains__(self, key=None):
1041 "D.__contains__(k) -> True if D has a key k, else False"
1042 if key is None or key.lower() not in self.__canonical_stored_name:
1045 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
1048 "D.keys() -> list of D's keys"
1049 if hasattr(self, "StoredVariables"):
1050 return self.StoredVariables.keys()
1054 def pop(self, k, d):
1055 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
1056 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
1057 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
1062 raise TypeError("pop expected at least 1 arguments, got 0")
1063 "If key is not found, d is returned if given, otherwise KeyError is raised"
1069 def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
1071 Doit implémenter l'opération élémentaire de calcul algorithmique.
1073 raise NotImplementedError("Mathematical algorithmic calculation has not been implemented!")
1075 def defineRequiredParameter(
1087 Permet de définir dans l'algorithme des paramètres requis et leurs
1088 caractéristiques par défaut.
1091 raise ValueError("A name is mandatory to define a required parameter.")
1093 self.__required_parameters[name] = {
1094 "default" : default, # noqa: E203
1095 "typecast" : typecast, # noqa: E203
1096 "minval" : minval, # noqa: E203
1097 "maxval" : maxval, # noqa: E203
1098 "listval" : listval, # noqa: E203
1099 "listadv" : listadv, # noqa: E203
1100 "message" : message, # noqa: E203
1101 "oldname" : oldname, # noqa: E203
1103 self.__canonical_parameter_name[name.lower()] = name
1104 if oldname is not None:
1105 self.__canonical_parameter_name[oldname.lower()] = name # Conversion
1106 self.__replace_by_the_new_name[oldname.lower()] = name
1107 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
1109 def getRequiredParameters(self, noDetails=True):
1111 Renvoie la liste des noms de paramètres requis ou directement le
1112 dictionnaire des paramètres requis.
1115 return sorted(self.__required_parameters.keys())
1117 return self.__required_parameters
1119 def setParameterValue(self, name=None, value=None):
1121 Renvoie la valeur d'un paramètre requis de manière contrôlée
1123 __k = self.__canonical_parameter_name[name.lower()]
1124 default = self.__required_parameters[__k]["default"]
1125 typecast = self.__required_parameters[__k]["typecast"]
1126 minval = self.__required_parameters[__k]["minval"]
1127 maxval = self.__required_parameters[__k]["maxval"]
1128 listval = self.__required_parameters[__k]["listval"]
1129 listadv = self.__required_parameters[__k]["listadv"]
1131 if value is None and default is None:
1133 elif value is None and default is not None:
1134 if typecast is None:
1137 __val = typecast( default )
1139 if typecast is None:
1143 __val = typecast( value )
1145 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
1147 if minval is not None and (numpy.array(__val, float) < minval).any():
1148 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
1149 if maxval is not None and (numpy.array(__val, float) > maxval).any():
1150 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
1151 if listval is not None or listadv is not None:
1152 if typecast is list or typecast is tuple or isinstance(__val, list) or isinstance(__val, tuple):
1154 if listval is not None and v in listval:
1156 elif listadv is not None and v in listadv:
1159 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
1160 elif not (listval is not None and __val in listval) and not (listadv is not None and __val in listadv):
1161 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(__val, __k, listval))
1163 if __k in ["SetSeed",]:
1168 def requireInputArguments(self, mandatory=(), optional=()):
1170 Permet d'imposer des arguments de calcul requis en entrée.
1172 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
1173 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
1175 def getInputArguments(self):
1177 Permet d'obtenir les listes des arguments de calcul requis en entrée.
1179 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
1181 def setAttributes(self, tags=(), features=()):
1183 Permet d'adjoindre des attributs comme les tags de classification.
1184 Renvoie la liste actuelle dans tous les cas.
1186 self.__required_inputs["AttributesTags"].extend( tags )
1187 self.__required_inputs["AttributesFeatures"].extend( features )
1188 return (self.__required_inputs["AttributesTags"], self.__required_inputs["AttributesFeatures"])
1190 def __setParameters(self, fromDico={}, reset=False):
1192 Permet de stocker les paramètres reçus dans le dictionnaire interne.
1194 self._parameters.update( fromDico )
1195 __inverse_fromDico_keys = {}
1196 for k in fromDico.keys():
1197 if k.lower() in self.__canonical_parameter_name:
1198 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
1199 # __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
1200 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
1202 for k in __inverse_fromDico_keys.values():
1203 if k.lower() in self.__replace_by_the_new_name:
1204 __newk = self.__replace_by_the_new_name[k.lower()]
1205 __msg = "the parameter \"%s\" used in \"%s\" algorithm case is deprecated and has to be replaced by \"%s\"."%(k, self._name, __newk)
1206 __msg += " Please update your code."
1207 warnings.warn(__msg, FutureWarning, stacklevel=50)
1209 for k in self.__required_parameters.keys():
1210 if k in __canonic_fromDico_keys:
1211 self._parameters[k] = self.setParameterValue(k, fromDico[__inverse_fromDico_keys[k]])
1213 self._parameters[k] = self.setParameterValue(k)
1216 if hasattr(self._parameters[k], "size") and self._parameters[k].size > 100:
1217 logging.debug("%s %s d'une taille totale de %s", self._name, self.__required_parameters[k]["message"], self._parameters[k].size)
1218 elif hasattr(self._parameters[k], "__len__") and len(self._parameters[k]) > 100:
1219 logging.debug("%s %s de longueur %s", self._name, self.__required_parameters[k]["message"], len(self._parameters[k]))
1221 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
1223 def _setInternalState(self, key=None, value=None, fromDico={}, reset=False):
1225 Permet de stocker des variables nommées constituant l'état interne
1227 if reset: # Vide le dictionnaire préalablement
1228 self.__internal_state = {}
1229 if key is not None and value is not None:
1230 self.__internal_state[key] = value
1231 self.__internal_state.update( dict(fromDico) )
1233 def _getInternalState(self, key=None):
1235 Restitue un état interne sous la forme d'un dictionnaire de variables nommées
1237 if key is not None and key in self.__internal_state:
1238 return self.__internal_state[key]
1240 return self.__internal_state
1242 def _getTimeState(self, reset=False):
1244 Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde
1247 self.__initial_cpu_time = time.process_time()
1248 self.__initial_elapsed_time = time.perf_counter()
1251 self.__cpu_time = time.process_time() - self.__initial_cpu_time
1252 self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time
1253 return self.__cpu_time, self.__elapsed_time
1255 def _StopOnTimeLimit(self, X=None, withReason=False):
1256 "Stop criteria on time limit: True/False [+ Reason]"
1257 c, e = self._getTimeState()
1258 if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]:
1259 __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"])
1260 elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]:
1261 __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"])
1263 __SC, __SR = False, ""
1269 # ==============================================================================
1270 class PartialAlgorithm(object):
1272 Classe pour mimer "Algorithm" du point de vue stockage, mais sans aucune
1273 action avancée comme la vérification . Pour les méthodes reprises ici,
1274 le fonctionnement est identique à celles de la classe "Algorithm".
1277 "_name", "_parameters", "StoredVariables", "__canonical_stored_name",
1280 def __init__(self, name):
1281 self._name = str( name )
1282 self._parameters = {"StoreSupplementaryCalculations": []}
1284 self.StoredVariables = {}
1285 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
1286 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
1287 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
1288 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
1289 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
1290 self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
1292 self.__canonical_stored_name = {}
1293 for k in self.StoredVariables:
1294 self.__canonical_stored_name[k.lower()] = k
1296 def _toStore(self, key):
1297 "True if in StoreSupplementaryCalculations, else False"
1298 return key in self._parameters["StoreSupplementaryCalculations"]
1300 def get(self, key=None):
1302 Renvoie l'une des variables stockées identifiée par la clé, ou le
1303 dictionnaire de l'ensemble des variables disponibles en l'absence de
1304 clé. Ce sont directement les variables sous forme objet qui sont
1305 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
1306 des classes de persistance.
1309 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
1311 return self.StoredVariables
1313 # ==============================================================================
1314 class AlgorithmAndParameters(object):
1316 Classe générale d'interface d'action pour l'algorithme et ses paramètres
1319 "__name", "__algorithm", "__algorithmFile", "__algorithmName", "__A",
1320 "__P", "__Xb", "__Y", "__U", "__HO", "__EM", "__CM", "__B", "__R",
1321 "__Q", "__variable_names_not_public",
1325 name = "GenericAlgorithm",
1331 self.__name = str(name)
1335 self.__algorithm = {}
1336 self.__algorithmFile = None
1337 self.__algorithmName = None
1339 self.updateParameters( asDict, asScript )
1341 if asAlgorithm is None and asScript is not None:
1342 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1344 __Algo = asAlgorithm
1346 if __Algo is not None:
1347 self.__A = str(__Algo)
1348 self.__P.update( {"Algorithm": self.__A} )
1350 self.__setAlgorithm( self.__A )
1352 self.__variable_names_not_public = {"nextStep": False} # Duplication dans Algorithm
1354 def updateParameters(self, asDict = None, asScript = None ):
1355 "Mise à jour des paramètres"
1356 if asDict is None and asScript is not None:
1357 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1361 if __Dict is not None:
1362 self.__P.update( dict(__Dict) )
1364 def executePythonScheme(self, asDictAO = None):
1365 "Permet de lancer le calcul d'assimilation"
1366 Operator.CM.clearCache()
1368 if not isinstance(asDictAO, dict):
1369 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1370 if hasattr(asDictAO["Background"], "getO"): self.__Xb = asDictAO["Background"].getO() # noqa: E241,E701
1371 elif hasattr(asDictAO["CheckingPoint"], "getO"): self.__Xb = asDictAO["CheckingPoint"].getO() # noqa: E241,E701
1372 else: self.__Xb = None # noqa: E241,E701
1373 if hasattr(asDictAO["Observation"], "getO"): self.__Y = asDictAO["Observation"].getO() # noqa: E241,E701
1374 else: self.__Y = asDictAO["Observation"] # noqa: E241,E701
1375 if hasattr(asDictAO["ControlInput"], "getO"): self.__U = asDictAO["ControlInput"].getO() # noqa: E241,E701
1376 else: self.__U = asDictAO["ControlInput"] # noqa: E241,E701
1377 if hasattr(asDictAO["ObservationOperator"], "getO"): self.__HO = asDictAO["ObservationOperator"].getO() # noqa: E241,E701
1378 else: self.__HO = asDictAO["ObservationOperator"] # noqa: E241,E701
1379 if hasattr(asDictAO["EvolutionModel"], "getO"): self.__EM = asDictAO["EvolutionModel"].getO() # noqa: E241,E701
1380 else: self.__EM = asDictAO["EvolutionModel"] # noqa: E241,E701
1381 if hasattr(asDictAO["ControlModel"], "getO"): self.__CM = asDictAO["ControlModel"].getO() # noqa: E241,E701
1382 else: self.__CM = asDictAO["ControlModel"] # noqa: E241,E701
1383 self.__B = asDictAO["BackgroundError"]
1384 self.__R = asDictAO["ObservationError"]
1385 self.__Q = asDictAO["EvolutionError"]
1387 self.__shape_validate()
1389 self.__algorithm.run(
1399 Parameters = self.__P,
1403 def executeYACSScheme(self, FileName=None):
1404 "Permet de lancer le calcul d'assimilation"
1405 if FileName is None or not os.path.exists(FileName):
1406 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1408 __file = os.path.abspath(FileName)
1409 logging.debug("The YACS file name is \"%s\"."%__file)
1410 if not PlatformInfo.has_salome or \
1411 not PlatformInfo.has_yacs or \
1412 not PlatformInfo.has_adao:
1415 "Unable to get SALOME, YACS or ADAO environnement variables.\n" + \
1416 "Please load the right environnement before trying to use it.\n" )
1419 import SALOMERuntime
1421 SALOMERuntime.RuntimeSALOME_setRuntime()
1423 r = pilot.getRuntime()
1424 xmlLoader = loader.YACSLoader()
1425 xmlLoader.registerProcCataLoader()
1427 catalogAd = r.loadCatalog("proc", __file)
1428 r.addCatalog(catalogAd)
1433 p = xmlLoader.load(__file)
1434 except IOError as ex:
1435 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1437 logger = p.getLogger("parser")
1438 if not logger.isEmpty():
1439 print("The imported YACS XML schema has errors on parsing:")
1440 print(logger.getStr())
1443 print("The YACS XML schema is not valid and will not be executed:")
1444 print(p.getErrorReport())
1446 info = pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1447 p.checkConsistency(info)
1448 if info.areWarningsOrErrors():
1449 print("The YACS XML schema is not coherent and will not be executed:")
1450 print(info.getGlobalRepr())
1452 e = pilot.ExecutorSwig()
1454 if p.getEffectiveState() != pilot.DONE:
1455 print(p.getErrorReport())
1459 def get(self, key = None):
1460 "Vérifie l'existence d'une clé de variable ou de paramètres"
1461 if key in self.__algorithm:
1462 return self.__algorithm.get( key )
1463 elif key in self.__P:
1464 return self.__P[key]
1466 allvariables = self.__P
1467 for k in self.__variable_names_not_public:
1468 allvariables.pop(k, None)
1471 def pop(self, k, d):
1472 "Necessaire pour le pickling"
1473 return self.__algorithm.pop(k, d)
1475 def getAlgorithmRequiredParameters(self, noDetails=True):
1476 "Renvoie la liste des paramètres requis selon l'algorithme"
1477 return self.__algorithm.getRequiredParameters(noDetails)
1479 def getAlgorithmInputArguments(self):
1480 "Renvoie la liste des entrées requises selon l'algorithme"
1481 return self.__algorithm.getInputArguments()
1483 def getAlgorithmAttributes(self):
1484 "Renvoie la liste des attributs selon l'algorithme"
1485 return self.__algorithm.setAttributes()
1487 def setObserver(self, __V, __O, __I, __S):
1488 if self.__algorithm is None \
1489 or isinstance(self.__algorithm, dict) \
1490 or not hasattr(self.__algorithm, "StoredVariables"):
1491 raise ValueError("No observer can be build before choosing an algorithm.")
1492 if __V not in self.__algorithm:
1493 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1495 self.__algorithm.StoredVariables[ __V ].setDataObserver( Scheduler = __S, HookFunction = __O, HookParameters = __I )
1497 def removeObserver(self, __V, __O, __A = False):
1498 if self.__algorithm is None \
1499 or isinstance(self.__algorithm, dict) \
1500 or not hasattr(self.__algorithm, "StoredVariables"):
1501 raise ValueError("No observer can be removed before choosing an algorithm.")
1502 if __V not in self.__algorithm:
1503 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1505 return self.__algorithm.StoredVariables[ __V ].removeDataObserver( HookFunction = __O, AllObservers = __A )
1507 def hasObserver(self, __V):
1508 if self.__algorithm is None \
1509 or isinstance(self.__algorithm, dict) \
1510 or not hasattr(self.__algorithm, "StoredVariables"):
1512 if __V not in self.__algorithm:
1514 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1517 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1518 for k in self.__variable_names_not_public:
1519 if k in __allvariables:
1520 __allvariables.remove(k)
1521 return __allvariables
1523 def __contains__(self, key=None):
1524 "D.__contains__(k) -> True if D has a key k, else False"
1525 return key in self.__algorithm or key in self.__P
1528 "x.__repr__() <==> repr(x)"
1529 return repr(self.__A) + ", " + repr(self.__P)
1532 "x.__str__() <==> str(x)"
1533 return str(self.__A) + ", " + str(self.__P)
1535 def __setAlgorithm(self, choice = None ):
1537 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1538 d'assimilation. L'argument est un champ caractère se rapportant au nom
1539 d'un algorithme réalisant l'opération sur les arguments fixes.
1542 raise ValueError("Error: algorithm choice has to be given")
1543 if self.__algorithmName is not None:
1544 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1545 daDirectory = "daAlgorithms"
1547 # Recherche explicitement le fichier complet
1548 # ------------------------------------------
1550 for directory in sys.path:
1551 if os.path.isfile(os.path.join(directory, daDirectory, str(choice) + '.py')):
1552 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1553 if module_path is None:
1555 "No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1557 # Importe le fichier complet comme un module
1558 # ------------------------------------------
1560 sys_path_tmp = sys.path
1561 sys.path.insert(0, module_path)
1562 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1563 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1564 raise ImportError("this module does not define a valid elementary algorithm.")
1565 self.__algorithmName = str(choice)
1566 sys.path = sys_path_tmp
1568 except ImportError as e:
1570 "The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice, e))
1572 # Instancie un objet du type élémentaire du fichier
1573 # -------------------------------------------------
1574 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1577 def __shape_validate(self):
1579 Validation de la correspondance correcte des tailles des variables et
1580 des matrices s'il y en a.
1582 if self.__Xb is None: __Xb_shape = (0,) # noqa: E241,E701
1583 elif hasattr(self.__Xb, "size"): __Xb_shape = (self.__Xb.size,) # noqa: E241,E701
1584 elif hasattr(self.__Xb, "shape"):
1585 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape # noqa: E241,E701
1586 else: __Xb_shape = self.__Xb.shape() # noqa: E241,E701
1587 else: raise TypeError("The background (Xb) has no attribute of shape: problem !") # noqa: E701
1589 if self.__Y is None: __Y_shape = (0,) # noqa: E241,E701
1590 elif hasattr(self.__Y, "size"): __Y_shape = (self.__Y.size,) # noqa: E241,E701
1591 elif hasattr(self.__Y, "shape"):
1592 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape # noqa: E241,E701
1593 else: __Y_shape = self.__Y.shape() # noqa: E241,E701
1594 else: raise TypeError("The observation (Y) has no attribute of shape: problem !") # noqa: E701
1596 if self.__U is None: __U_shape = (0,) # noqa: E241,E701
1597 elif hasattr(self.__U, "size"): __U_shape = (self.__U.size,) # noqa: E241,E701
1598 elif hasattr(self.__U, "shape"):
1599 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape # noqa: E241,E701
1600 else: __U_shape = self.__U.shape() # noqa: E241,E701
1601 else: raise TypeError("The control (U) has no attribute of shape: problem !") # noqa: E701
1603 if self.__B is None: __B_shape = (0, 0) # noqa: E241,E701
1604 elif hasattr(self.__B, "shape"):
1605 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape # noqa: E241,E701
1606 else: __B_shape = self.__B.shape() # noqa: E241,E701
1607 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !") # noqa: E701
1609 if self.__R is None: __R_shape = (0, 0) # noqa: E241,E701
1610 elif hasattr(self.__R, "shape"):
1611 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape # noqa: E241,E701
1612 else: __R_shape = self.__R.shape() # noqa: E241,E701
1613 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !") # noqa: E701
1615 if self.__Q is None: __Q_shape = (0, 0) # noqa: E241,E701
1616 elif hasattr(self.__Q, "shape"):
1617 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape # noqa: E241,E701
1618 else: __Q_shape = self.__Q.shape() # noqa: E241,E701
1619 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !") # noqa: E701
1621 if len(self.__HO) == 0: __HO_shape = (0, 0) # noqa: E241,E701
1622 elif isinstance(self.__HO, dict): __HO_shape = (0, 0) # noqa: E241,E701
1623 elif hasattr(self.__HO["Direct"], "shape"):
1624 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape # noqa: E241,E701
1625 else: __HO_shape = self.__HO["Direct"].shape() # noqa: E241,E701
1626 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !") # noqa: E701
1628 if len(self.__EM) == 0: __EM_shape = (0, 0) # noqa: E241,E701
1629 elif isinstance(self.__EM, dict): __EM_shape = (0, 0) # noqa: E241,E701
1630 elif hasattr(self.__EM["Direct"], "shape"):
1631 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape # noqa: E241,E701
1632 else: __EM_shape = self.__EM["Direct"].shape() # noqa: E241,E701
1633 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !") # noqa: E241,E70
1635 if len(self.__CM) == 0: __CM_shape = (0, 0) # noqa: E241,E701
1636 elif isinstance(self.__CM, dict): __CM_shape = (0, 0) # noqa: E241,E701
1637 elif hasattr(self.__CM["Direct"], "shape"):
1638 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape # noqa: E241,E701
1639 else: __CM_shape = self.__CM["Direct"].shape() # noqa: E241,E701
1640 else: raise TypeError("The control model (CM) has no attribute of shape: problem !") # noqa: E701
1642 # Vérification des conditions
1643 # ---------------------------
1644 if not ( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1645 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1646 if not ( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1647 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1649 if not ( min(__B_shape) == max(__B_shape) ):
1650 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1651 if not ( min(__R_shape) == max(__R_shape) ):
1652 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1653 if not ( min(__Q_shape) == max(__Q_shape) ):
1654 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1655 if not ( min(__EM_shape) == max(__EM_shape) ):
1656 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1658 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not ( __HO_shape[1] == max(__Xb_shape) ):
1660 "Shape characteristic of observation operator (H)" + \
1661 " \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape, __Xb_shape))
1662 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not ( __HO_shape[0] == max(__Y_shape) ):
1664 "Shape characteristic of observation operator (H)" + \
1665 " \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape, __Y_shape))
1666 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not ( __HO_shape[1] == __B_shape[0] ):
1668 "Shape characteristic of observation operator (H)" + \
1669 " \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape, __B_shape))
1670 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not ( __HO_shape[0] == __R_shape[1] ):
1672 "Shape characteristic of observation operator (H)" + \
1673 " \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape, __R_shape))
1675 if self.__B is not None and len(self.__B) > 0 and not ( __B_shape[1] == max(__Xb_shape) ):
1676 if self.__algorithmName in ["EnsembleBlue",]:
1677 asPersistentVector = self.__Xb.reshape((-1, min(__B_shape)))
1678 self.__Xb = Persistence.OneVector("Background")
1679 for member in asPersistentVector:
1680 self.__Xb.store( numpy.asarray(member, dtype=float) )
1681 __Xb_shape = min(__B_shape)
1684 "Shape characteristic of a priori errors covariance matrix (B)" + \
1685 " \"%s\" and background vector (Xb) \"%s\" are incompatible."%(__B_shape, __Xb_shape))
1687 if self.__R is not None and len(self.__R) > 0 and not ( __R_shape[1] == max(__Y_shape) ):
1689 "Shape characteristic of observation errors covariance matrix (R)" + \
1690 " \"%s\" and observation vector (Y) \"%s\" are incompatible."%(__R_shape, __Y_shape))
1692 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not ( __EM_shape[1] == max(__Xb_shape) ):
1694 "Shape characteristic of evolution model (EM)" + \
1695 " \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape, __Xb_shape))
1697 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not ( __CM_shape[1] == max(__U_shape) ):
1699 "Shape characteristic of control model (CM)" + \
1700 " \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape, __U_shape))
1702 if ("Bounds" in self.__P) \
1703 and isinstance(self.__P["Bounds"], (list, tuple)) \
1704 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1705 if len(self.__P["Bounds"]) > 0:
1706 raise ValueError("The number '%s' of bound pairs for the state components is different from the size '%s' of the state (X) itself." \
1707 % (len(self.__P["Bounds"]), max(__Xb_shape)))
1709 self.__P["Bounds"] = None
1710 if ("Bounds" in self.__P) \
1711 and isinstance(self.__P["Bounds"], (numpy.ndarray, numpy.matrix)) \
1712 and (self.__P["Bounds"].shape[0] != max(__Xb_shape)):
1713 if self.__P["Bounds"].size > 0:
1714 raise ValueError("The number '%s' of bound pairs for the state components is different from the size '%s' of the state (X) itself." \
1715 % (self.__P["Bounds"].shape[0], max(__Xb_shape)))
1717 self.__P["Bounds"] = None
1719 if ("BoxBounds" in self.__P) \
1720 and isinstance(self.__P["BoxBounds"], (list, tuple)) \
1721 and (len(self.__P["BoxBounds"]) != max(__Xb_shape)):
1722 raise ValueError("The number '%s' of bound pairs for the state box components is different from the size '%s' of the state (X) itself." \
1723 % (len(self.__P["BoxBounds"]), max(__Xb_shape)))
1724 if ("BoxBounds" in self.__P) \
1725 and isinstance(self.__P["BoxBounds"], (numpy.ndarray, numpy.matrix)) \
1726 and (self.__P["BoxBounds"].shape[0] != max(__Xb_shape)):
1727 raise ValueError("The number '%s' of bound pairs for the state box components is different from the size '%s' of the state (X) itself." \
1728 % (self.__P["BoxBounds"].shape[0], max(__Xb_shape)))
1730 if ("StateBoundsForQuantiles" in self.__P) \
1731 and isinstance(self.__P["StateBoundsForQuantiles"], (list, tuple)) \
1732 and (len(self.__P["StateBoundsForQuantiles"]) != max(__Xb_shape)):
1733 raise ValueError("The number '%s' of bound pairs for the quantile state components is different from the size '%s' of the state (X) itself." \
1734 % (len(self.__P["StateBoundsForQuantiles"]), max(__Xb_shape)))
1738 # ==============================================================================
1739 class RegulationAndParameters(object):
1741 Classe générale d'interface d'action pour la régulation et ses paramètres
1743 __slots__ = ("__name", "__P")
1746 name = "GenericRegulation",
1752 self.__name = str(name)
1755 if asAlgorithm is None and asScript is not None:
1756 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1758 __Algo = asAlgorithm
1760 if asDict is None and asScript is not None:
1761 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1765 if __Dict is not None:
1766 self.__P.update( dict(__Dict) )
1768 if __Algo is not None:
1769 self.__P.update( {"Algorithm": str(__Algo)} )
1771 def get(self, key = None):
1772 "Vérifie l'existence d'une clé de variable ou de paramètres"
1774 return self.__P[key]
1778 # ==============================================================================
1779 class DataObserver(object):
1781 Classe générale d'interface de type observer
1783 __slots__ = ("__name", "__V", "__O", "__I")
1786 name = "GenericObserver",
1797 self.__name = str(name)
1802 if onVariable is None:
1803 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1804 elif type(onVariable) in (tuple, list):
1805 self.__V = tuple(map( str, onVariable ))
1806 if withInfo is None:
1809 self.__I = (str(withInfo),) * len(self.__V)
1810 elif isinstance(onVariable, str):
1811 self.__V = (onVariable,)
1812 if withInfo is None:
1813 self.__I = (onVariable,)
1815 self.__I = (str(withInfo),)
1817 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1819 if asObsObject is not None:
1820 self.__O = asObsObject
1822 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1823 __Function = Observer2Func(__FunctionText)
1824 self.__O = __Function.getfunc()
1826 for k in range(len(self.__V)):
1829 if ename not in withAlgo:
1830 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1832 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1835 "x.__repr__() <==> repr(x)"
1836 return repr(self.__V) + "\n" + repr(self.__O)
1839 "x.__str__() <==> str(x)"
1840 return str(self.__V) + "\n" + str(self.__O)
1842 # ==============================================================================
1843 class UserScript(object):
1845 Classe générale d'interface de type texte de script utilisateur
1847 __slots__ = ("__name", "__F")
1850 name = "GenericUserScript",
1856 self.__name = str(name)
1858 if asString is not None:
1860 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1861 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1862 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1863 self.__F = Templates.ObserverTemplates[asTemplate]
1864 elif asScript is not None:
1865 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1870 "x.__repr__() <==> repr(x)"
1871 return repr(self.__F)
1874 "x.__str__() <==> str(x)"
1875 return str(self.__F)
1877 # ==============================================================================
1878 class ExternalParameters(object):
1880 Classe générale d'interface pour le stockage des paramètres externes
1882 __slots__ = ("__name", "__P")
1885 name = "GenericExternalParameters",
1890 self.__name = str(name)
1893 self.updateParameters( asDict, asScript )
1895 def updateParameters(self, asDict = None, asScript = None ):
1896 "Mise à jour des paramètres"
1897 if asDict is None and asScript is not None:
1898 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1902 if __Dict is not None:
1903 self.__P.update( dict(__Dict) )
1905 def get(self, key = None):
1907 return self.__P[key]
1909 return list(self.__P.keys())
1912 return list(self.__P.keys())
1914 def pop(self, k, d):
1915 return self.__P.pop(k, d)
1918 return self.__P.items()
1920 def __contains__(self, key=None):
1921 "D.__contains__(k) -> True if D has a key k, else False"
1922 return key in self.__P
1924 # ==============================================================================
1925 class State(object):
1927 Classe générale d'interface de type état
1930 "__name", "__check", "__V", "__T", "__is_vector", "__is_series",
1935 name = "GenericVector",
1937 asPersistentVector = None,
1943 toBeChecked = False ):
1945 Permet de définir un vecteur :
1946 - asVector : entrée des données, comme un vecteur compatible avec le
1947 constructeur de numpy.matrix, ou "True" si entrée par script.
1948 - asPersistentVector : entrée des données, comme une série de vecteurs
1949 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1950 type Persistence, ou "True" si entrée par script.
1951 - asScript : si un script valide est donné contenant une variable
1952 nommée "name", la variable est de type "asVector" (par défaut) ou
1953 "asPersistentVector" selon que l'une de ces variables est placée à
1955 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1956 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1957 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1958 nommée "name"), on récupère les colonnes et on les range ligne après
1959 ligne (colMajor=False, par défaut) ou colonne après colonne
1960 (colMajor=True). La variable résultante est de type "asVector" (par
1961 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1964 self.__name = str(name)
1965 self.__check = bool(toBeChecked)
1969 self.__is_vector = False
1970 self.__is_series = False
1972 if asScript is not None:
1973 __Vector, __Series = None, None
1974 if asPersistentVector:
1975 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1977 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1978 elif asDataFile is not None:
1979 __Vector, __Series = None, None
1980 if asPersistentVector:
1981 if colNames is not None:
1982 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1984 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1985 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1986 __Series = numpy.transpose(__Series)
1987 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1988 __Series = numpy.transpose(__Series)
1990 if colNames is not None:
1991 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1993 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1995 __Vector = numpy.ravel(__Vector, order = "F")
1997 __Vector = numpy.ravel(__Vector, order = "C")
1999 __Vector, __Series = asVector, asPersistentVector
2001 if __Vector is not None:
2002 self.__is_vector = True
2003 if isinstance(__Vector, str):
2004 __Vector = PlatformInfo.strvect2liststr( __Vector )
2005 self.__V = numpy.ravel(numpy.asarray( __Vector, dtype=float )).reshape((-1, 1))
2006 self.shape = self.__V.shape
2007 self.size = self.__V.size
2008 elif __Series is not None:
2009 self.__is_series = True
2010 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
2011 self.__V = Persistence.OneVector(self.__name)
2012 if isinstance(__Series, str):
2013 __Series = PlatformInfo.strmatrix2liststr(__Series)
2014 for member in __Series:
2015 if isinstance(member, str):
2016 member = PlatformInfo.strvect2liststr( member )
2017 self.__V.store(numpy.asarray( member, dtype=float ))
2020 if isinstance(self.__V.shape, (tuple, list)):
2021 self.shape = self.__V.shape
2023 self.shape = self.__V.shape()
2024 if len(self.shape) == 1:
2025 self.shape = (self.shape[0], 1)
2026 self.size = self.shape[0] * self.shape[1]
2029 "The %s object is improperly defined or undefined,"%self.__name + \
2030 " it requires at minima either a vector, a list/tuple of" + \
2031 " vectors or a persistent object. Please check your vector input.")
2033 if scheduledBy is not None:
2034 self.__T = scheduledBy
2036 def getO(self, withScheduler=False):
2038 return self.__V, self.__T
2039 elif self.__T is None:
2045 "Vérification du type interne"
2046 return self.__is_vector
2049 "Vérification du type interne"
2050 return self.__is_series
2053 "x.__repr__() <==> repr(x)"
2054 return repr(self.__V)
2057 "x.__str__() <==> str(x)"
2058 return str(self.__V)
2060 # ==============================================================================
2061 class Covariance(object):
2063 Classe générale d'interface de type covariance
2066 "__name", "__check", "__C", "__is_scalar", "__is_vector", "__is_matrix",
2067 "__is_object", "shape", "size",
2071 name = "GenericCovariance",
2072 asCovariance = None,
2073 asEyeByScalar = None,
2074 asEyeByVector = None,
2077 toBeChecked = False ):
2079 Permet de définir une covariance :
2080 - asCovariance : entrée des données, comme une matrice compatible avec
2081 le constructeur de numpy.matrix
2082 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
2083 multiplicatif d'une matrice de corrélation identité, aucune matrice
2084 n'étant donc explicitement à donner
2085 - asEyeByVector : entrée des données comme un seul vecteur de variance,
2086 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
2087 n'étant donc explicitement à donner
2088 - asCovObject : entrée des données comme un objet python, qui a les
2089 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
2090 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
2091 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
2092 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
2093 pleine doit être vérifié
2095 self.__name = str(name)
2096 self.__check = bool(toBeChecked)
2099 self.__is_scalar = False
2100 self.__is_vector = False
2101 self.__is_matrix = False
2102 self.__is_object = False
2104 if asScript is not None:
2105 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
2107 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2109 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2111 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2113 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2115 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
2117 if __Scalar is not None:
2118 if isinstance(__Scalar, str):
2119 __Scalar = PlatformInfo.strvect2liststr( __Scalar )
2120 if len(__Scalar) > 0:
2121 __Scalar = __Scalar[0]
2122 if numpy.array(__Scalar).size != 1:
2124 " The diagonal multiplier given to define a sparse matrix is" + \
2125 " not a unique scalar value.\n Its actual measured size is" + \
2126 " %i. Please check your scalar input."%numpy.array(__Scalar).size)
2127 self.__is_scalar = True
2128 self.__C = numpy.abs( float(__Scalar) )
2131 elif __Vector is not None:
2132 if isinstance(__Vector, str):
2133 __Vector = PlatformInfo.strvect2liststr( __Vector )
2134 self.__is_vector = True
2135 self.__C = numpy.abs( numpy.ravel(numpy.asarray( __Vector, dtype=float )) )
2136 self.shape = (self.__C.size, self.__C.size)
2137 self.size = self.__C.size**2
2138 elif __Matrix is not None:
2139 self.__is_matrix = True
2140 self.__C = numpy.matrix( __Matrix, float )
2141 self.shape = self.__C.shape
2142 self.size = self.__C.size
2143 elif __Object is not None:
2144 self.__is_object = True
2146 for at in ("getT", "getI", "diag", "trace", "__add__", "__sub__", "__neg__", "__matmul__", "__mul__", "__rmatmul__", "__rmul__"):
2147 if not hasattr(self.__C, at):
2148 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name, at))
2149 if hasattr(self.__C, "shape"):
2150 self.shape = self.__C.shape
2153 if hasattr(self.__C, "size"):
2154 self.size = self.__C.size
2162 def __validate(self):
2164 if self.__C is None:
2165 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
2166 if self.ismatrix() and min(self.shape) != max(self.shape):
2167 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name, self.shape))
2168 if self.isobject() and min(self.shape) != max(self.shape):
2169 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name, self.shape))
2170 if self.isscalar() and self.__C <= 0:
2171 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name, self.__C))
2172 if self.isvector() and (self.__C <= 0).any():
2173 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
2174 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
2176 numpy.linalg.cholesky( self.__C )
2178 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
2179 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
2183 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
2186 "Vérification du type interne"
2187 return self.__is_scalar
2190 "Vérification du type interne"
2191 return self.__is_vector
2194 "Vérification du type interne"
2195 return self.__is_matrix
2198 "Vérification du type interne"
2199 return self.__is_object
2204 return Covariance(self.__name + "I", asCovariance = numpy.linalg.inv(self.__C) )
2205 elif self.isvector():
2206 return Covariance(self.__name + "I", asEyeByVector = 1. / self.__C )
2207 elif self.isscalar():
2208 return Covariance(self.__name + "I", asEyeByScalar = 1. / self.__C )
2209 elif self.isobject() and hasattr(self.__C, "getI"):
2210 return Covariance(self.__name + "I", asCovObject = self.__C.getI() )
2212 return None # Indispensable
2217 return Covariance(self.__name + "T", asCovariance = self.__C.T )
2218 elif self.isvector():
2219 return Covariance(self.__name + "T", asEyeByVector = self.__C )
2220 elif self.isscalar():
2221 return Covariance(self.__name + "T", asEyeByScalar = self.__C )
2222 elif self.isobject() and hasattr(self.__C, "getT"):
2223 return Covariance(self.__name + "T", asCovObject = self.__C.getT() )
2225 raise AttributeError("the %s covariance matrix has no getT attribute."%(self.__name,))
2228 "Décomposition de Cholesky"
2230 return Covariance(self.__name + "C", asCovariance = numpy.linalg.cholesky(self.__C) )
2231 elif self.isvector():
2232 return Covariance(self.__name + "C", asEyeByVector = numpy.sqrt( self.__C ) )
2233 elif self.isscalar():
2234 return Covariance(self.__name + "C", asEyeByScalar = numpy.sqrt( self.__C ) )
2235 elif self.isobject() and hasattr(self.__C, "cholesky"):
2236 return Covariance(self.__name + "C", asCovObject = self.__C.cholesky() )
2238 raise AttributeError("the %s covariance matrix has no cholesky attribute."%(self.__name,))
2240 def choleskyI(self):
2241 "Inversion de la décomposition de Cholesky"
2243 return Covariance(self.__name + "H", asCovariance = numpy.linalg.inv(numpy.linalg.cholesky(self.__C)) )
2244 elif self.isvector():
2245 return Covariance(self.__name + "H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
2246 elif self.isscalar():
2247 return Covariance(self.__name + "H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
2248 elif self.isobject() and hasattr(self.__C, "choleskyI"):
2249 return Covariance(self.__name + "H", asCovObject = self.__C.choleskyI() )
2251 raise AttributeError("the %s covariance matrix has no choleskyI attribute."%(self.__name,))
2254 "Racine carrée matricielle"
2257 return Covariance(self.__name + "C", asCovariance = numpy.real(scipy.linalg.sqrtm(self.__C)) )
2258 elif self.isvector():
2259 return Covariance(self.__name + "C", asEyeByVector = numpy.sqrt( self.__C ) )
2260 elif self.isscalar():
2261 return Covariance(self.__name + "C", asEyeByScalar = numpy.sqrt( self.__C ) )
2262 elif self.isobject() and hasattr(self.__C, "sqrtm"):
2263 return Covariance(self.__name + "C", asCovObject = self.__C.sqrtm() )
2265 raise AttributeError("the %s covariance matrix has no sqrtm attribute."%(self.__name,))
2268 "Inversion de la racine carrée matricielle"
2271 return Covariance(self.__name + "H", asCovariance = numpy.linalg.inv(numpy.real(scipy.linalg.sqrtm(self.__C))) )
2272 elif self.isvector():
2273 return Covariance(self.__name + "H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
2274 elif self.isscalar():
2275 return Covariance(self.__name + "H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
2276 elif self.isobject() and hasattr(self.__C, "sqrtmI"):
2277 return Covariance(self.__name + "H", asCovObject = self.__C.sqrtmI() )
2279 raise AttributeError("the %s covariance matrix has no sqrtmI attribute."%(self.__name,))
2281 def diag(self, msize=None):
2282 "Diagonale de la matrice"
2284 return numpy.diag(self.__C)
2285 elif self.isvector():
2287 elif self.isscalar():
2289 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2291 return self.__C * numpy.ones(int(msize))
2292 elif self.isobject() and hasattr(self.__C, "diag"):
2293 return self.__C.diag()
2295 raise AttributeError("the %s covariance matrix has no diag attribute."%(self.__name,))
2297 def trace(self, msize=None):
2298 "Trace de la matrice"
2300 return numpy.trace(self.__C)
2301 elif self.isvector():
2302 return float(numpy.sum(self.__C))
2303 elif self.isscalar():
2305 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2307 return self.__C * int(msize)
2308 elif self.isobject():
2309 return self.__C.trace()
2311 raise AttributeError("the %s covariance matrix has no trace attribute."%(self.__name,))
2313 def asfullmatrix(self, msize=None):
2316 return numpy.asarray(self.__C, dtype=float)
2317 elif self.isvector():
2318 return numpy.asarray( numpy.diag(self.__C), dtype=float )
2319 elif self.isscalar():
2321 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2323 return numpy.asarray( self.__C * numpy.eye(int(msize)), dtype=float )
2324 elif self.isobject() and hasattr(self.__C, "asfullmatrix"):
2325 return self.__C.asfullmatrix()
2327 raise AttributeError("the %s covariance matrix has no asfullmatrix attribute."%(self.__name,))
2329 def assparsematrix(self):
2337 "x.__repr__() <==> repr(x)"
2338 return repr(self.__C)
2341 "x.__str__() <==> str(x)"
2342 return str(self.__C)
2344 def __add__(self, other):
2345 "x.__add__(y) <==> x+y"
2346 if self.ismatrix() or self.isobject():
2347 return self.__C + numpy.asmatrix(other)
2348 elif self.isvector() or self.isscalar():
2349 _A = numpy.asarray(other)
2350 if len(_A.shape) == 1:
2351 _A.reshape((-1, 1))[::2] += self.__C
2353 _A.reshape(_A.size)[::_A.shape[1] + 1] += self.__C
2354 return numpy.asmatrix(_A)
2356 def __radd__(self, other):
2357 "x.__radd__(y) <==> y+x"
2358 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name, type(other)))
2360 def __sub__(self, other):
2361 "x.__sub__(y) <==> x-y"
2362 if self.ismatrix() or self.isobject():
2363 return self.__C - numpy.asmatrix(other)
2364 elif self.isvector() or self.isscalar():
2365 _A = numpy.asarray(other)
2366 _A.reshape(_A.size)[::_A.shape[1] + 1] = self.__C - _A.reshape(_A.size)[::_A.shape[1] + 1]
2367 return numpy.asmatrix(_A)
2369 def __rsub__(self, other):
2370 "x.__rsub__(y) <==> y-x"
2371 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name, type(other)))
2374 "x.__neg__() <==> -x"
2377 def __matmul__(self, other):
2378 "x.__mul__(y) <==> x@y"
2379 if self.ismatrix() and isinstance(other, (int, float)):
2380 return numpy.asarray(self.__C) * other
2381 elif self.ismatrix() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2382 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2383 return numpy.ravel(self.__C @ numpy.ravel(other))
2384 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2385 return numpy.asarray(self.__C) @ numpy.asarray(other)
2387 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape, numpy.asarray(other).shape, self.__name))
2388 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2389 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2390 return numpy.ravel(self.__C) * numpy.ravel(other)
2391 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2392 return numpy.ravel(self.__C).reshape((-1, 1)) * numpy.asarray(other)
2394 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape, numpy.ravel(other).shape, self.__name))
2395 elif self.isscalar() and isinstance(other, numpy.matrix):
2396 return numpy.asarray(self.__C * other)
2397 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2398 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2399 return self.__C * numpy.ravel(other)
2401 return self.__C * numpy.asarray(other)
2402 elif self.isobject():
2403 return self.__C.__matmul__(other)
2405 raise NotImplementedError("%s covariance matrix __matmul__ method not available for %s type!"%(self.__name, type(other)))
2407 def __mul__(self, other):
2408 "x.__mul__(y) <==> x*y"
2409 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2410 return self.__C * other
2411 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2412 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2413 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2414 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2415 return self.__C * numpy.asmatrix(other)
2418 "operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape, numpy.asmatrix(other).shape, self.__name))
2419 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2420 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2421 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
2422 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2423 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
2426 "operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape, numpy.ravel(other).shape, self.__name))
2427 elif self.isscalar() and isinstance(other, numpy.matrix):
2428 return self.__C * other
2429 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2430 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2431 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2433 return self.__C * numpy.asmatrix(other)
2434 elif self.isobject():
2435 return self.__C.__mul__(other)
2437 raise NotImplementedError(
2438 "%s covariance matrix __mul__ method not available for %s type!"%(self.__name, type(other)))
2440 def __rmatmul__(self, other):
2441 "x.__rmul__(y) <==> y@x"
2442 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2443 return other * self.__C
2444 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2445 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2446 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2447 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2448 return numpy.asmatrix(other) * self.__C
2451 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape, self.shape, self.__name))
2452 elif self.isvector() and isinstance(other, numpy.matrix):
2453 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2454 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2455 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2456 return numpy.asmatrix(numpy.array(other) * self.__C)
2459 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape, self.shape, self.__name))
2460 elif self.isscalar() and isinstance(other, numpy.matrix):
2461 return other * self.__C
2462 elif self.isobject():
2463 return self.__C.__rmatmul__(other)
2465 raise NotImplementedError(
2466 "%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name, type(other)))
2468 def __rmul__(self, other):
2469 "x.__rmul__(y) <==> y*x"
2470 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2471 return other * self.__C
2472 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2473 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2474 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2475 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2476 return numpy.asmatrix(other) * self.__C
2479 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape, self.shape, self.__name))
2480 elif self.isvector() and isinstance(other, numpy.matrix):
2481 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2482 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2483 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2484 return numpy.asmatrix(numpy.array(other) * self.__C)
2487 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape, self.shape, self.__name))
2488 elif self.isscalar() and isinstance(other, numpy.matrix):
2489 return other * self.__C
2490 elif self.isscalar() and isinstance(other, float):
2491 return other * self.__C
2492 elif self.isobject():
2493 return self.__C.__rmul__(other)
2495 raise NotImplementedError(
2496 "%s covariance matrix __rmul__ method not available for %s type!"%(self.__name, type(other)))
2499 "x.__len__() <==> len(x)"
2500 return self.shape[0]
2502 # ==============================================================================
2503 class Observer2Func(object):
2505 Création d'une fonction d'observateur a partir de son texte
2507 __slots__ = ("__corps")
2509 def __init__(self, corps=""):
2510 self.__corps = corps
2512 def func(self, var, info):
2513 "Fonction d'observation"
2517 "Restitution du pointeur de fonction dans l'objet"
2520 # ==============================================================================
2521 class CaseLogger(object):
2523 Conservation des commandes de création d'un cas
2526 "__name", "__objname", "__logSerie", "__switchoff", "__viewers",
2530 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
2531 self.__name = str(__name)
2532 self.__objname = str(__objname)
2533 self.__logSerie = []
2534 self.__switchoff = False
2536 "TUI": Interfaces._TUIViewer,
2537 "SCD": Interfaces._SCDViewer,
2538 "YACS": Interfaces._YACSViewer,
2539 "SimpleReportInRst": Interfaces._SimpleReportInRstViewer,
2540 "SimpleReportInHtml": Interfaces._SimpleReportInHtmlViewer,
2541 "SimpleReportInPlainTxt": Interfaces._SimpleReportInPlainTxtViewer,
2544 "TUI": Interfaces._TUIViewer,
2545 "COM": Interfaces._COMViewer,
2547 if __addViewers is not None:
2548 self.__viewers.update(dict(__addViewers))
2549 if __addLoaders is not None:
2550 self.__loaders.update(dict(__addLoaders))
2552 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
2553 "Enregistrement d'une commande individuelle"
2554 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
2555 if "self" in __keys:
2556 __keys.remove("self")
2557 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
2559 self.__switchoff = True
2561 self.__switchoff = False
2563 def dump(self, __filename=None, __format="TUI", __upa=""):
2564 "Restitution normalisée des commandes"
2565 if __format in self.__viewers:
2566 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2568 raise ValueError("Dumping as \"%s\" is not available"%__format)
2569 return __formater.dump(__filename, __upa)
2571 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2572 "Chargement normalisé des commandes"
2573 if __format in self.__loaders:
2574 __formater = self.__loaders[__format]()
2576 raise ValueError("Loading as \"%s\" is not available"%__format)
2577 return __formater.load(__filename, __content, __object)
2579 # ==============================================================================
2582 _extraArguments = None,
2583 _sFunction = lambda x: x,
2585 _mpWorkers = None ):
2587 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2588 correspondante de valeurs de la fonction en argument
2590 # Vérifications et définitions initiales
2591 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2592 if not PlatformInfo.isIterable( __xserie ):
2593 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2595 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2598 __mpWorkers = int(_mpWorkers)
2600 import multiprocessing
2611 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2612 with multiprocessing.Pool(__mpWorkers) as pool:
2613 __multiHX = pool.map( _sFunction, _jobs )
2616 # logging.debug("MULTF Internal multiprocessing calculation end")
2618 # logging.debug("MULTF Internal monoprocessing calculation begin")
2620 if _extraArguments is None:
2621 for __xvalue in __xserie:
2622 __multiHX.append( _sFunction( __xvalue ) )
2623 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2624 for __xvalue in __xserie:
2625 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2626 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2627 for __xvalue in __xserie:
2628 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2630 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2631 # logging.debug("MULTF Internal monoprocessing calculation end")
2633 # logging.debug("MULTF Internal multifonction calculations end")
2636 # ==============================================================================
2637 if __name__ == "__main__":
2638 print("\n AUTODIAGNOSTIC\n")