1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2024 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
36 from functools import partial
37 from daCore import Persistence
38 from daCore import PlatformInfo
39 from daCore import Interfaces
40 from daCore import Templates
42 # ==============================================================================
43 class CacheManager(object):
45 Classe générale de gestion d'un cache de calculs
48 "__tolerBP", "__lengthOR", "__initlnOR", "__seenNames", "__enabled",
53 toleranceInRedundancy = 1.e-18,
54 lengthOfRedundancy = -1 ):
56 Les caractéristiques de tolérance peuvent être modifiées à la création.
58 self.__tolerBP = float(toleranceInRedundancy)
59 self.__lengthOR = int(lengthOfRedundancy)
60 self.__initlnOR = self.__lengthOR
70 def wasCalculatedIn(self, xValue, oName="" ):
71 "Vérifie l'existence d'un calcul correspondant à la valeur"
75 for i in range(min(len(self.__listOPCV), self.__lengthOR) - 1, -1, -1):
76 if not hasattr(xValue, 'size'):
78 elif (str(oName) != self.__listOPCV[i][3]):
80 elif (xValue.size != self.__listOPCV[i][0].size):
82 elif (numpy.ravel(xValue)[0] - self.__listOPCV[i][0][0]) > (self.__tolerBP * self.__listOPCV[i][2] / self.__listOPCV[i][0].size):
84 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < (self.__tolerBP * self.__listOPCV[i][2]):
86 __HxV = self.__listOPCV[i][1]
90 def storeValueInX(self, xValue, HxValue, oName="" ):
91 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
92 if self.__lengthOR < 0:
93 self.__lengthOR = 2 * min(numpy.size(xValue), 50) + 2
94 self.__initlnOR = self.__lengthOR
95 self.__seenNames.append(str(oName))
96 if str(oName) not in self.__seenNames: # Étend la liste si nouveau
97 self.__lengthOR += 2 * min(numpy.size(xValue), 50) + 2
98 self.__initlnOR += self.__lengthOR
99 self.__seenNames.append(str(oName))
100 while len(self.__listOPCV) > self.__lengthOR:
101 self.__listOPCV.pop(0)
102 self.__listOPCV.append((
103 copy.copy(numpy.ravel(xValue)), # 0 Previous point
104 copy.copy(HxValue), # 1 Previous value
105 numpy.linalg.norm(xValue), # 2 Norm
106 str(oName), # 3 Operator name
111 self.__initlnOR = self.__lengthOR
113 self.__enabled = False
117 self.__lengthOR = self.__initlnOR
118 self.__enabled = True
120 # ==============================================================================
121 class Operator(object):
123 Classe générale d'interface de type opérateur simple
126 "__name", "__NbCallsAsMatrix", "__NbCallsAsMethod",
127 "__NbCallsOfCached", "__reduceM", "__avoidRC", "__inputAsMF",
128 "__mpEnabled", "__extraArgs", "__Method", "__Matrix", "__Type",
137 name = "GenericOperator",
140 avoidingRedundancy = True,
141 reducingMemoryUse = False,
142 inputAsMultiFunction = False,
143 enableMultiProcess = False,
144 extraArguments = None ):
146 On construit un objet de ce type en fournissant, à l'aide de l'un des
147 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
150 - name : nom d'opérateur
151 - fromMethod : argument de type fonction Python
152 - fromMatrix : argument adapté au constructeur numpy.array/matrix
153 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
154 - reducingMemoryUse : booléen forçant (ou pas) des calculs moins
156 - inputAsMultiFunction : booléen indiquant une fonction explicitement
157 définie (ou pas) en multi-fonction
158 - extraArguments : arguments supplémentaires passés à la fonction de
159 base et ses dérivées (tuple ou dictionnaire)
161 self.__name = str(name)
162 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
163 self.__reduceM = bool( reducingMemoryUse )
164 self.__avoidRC = bool( avoidingRedundancy )
165 self.__inputAsMF = bool( inputAsMultiFunction )
166 self.__mpEnabled = bool( enableMultiProcess )
167 self.__extraArgs = extraArguments
168 if fromMethod is not None and self.__inputAsMF:
169 self.__Method = fromMethod # logtimer(fromMethod)
171 self.__Type = "Method"
172 elif fromMethod is not None and not self.__inputAsMF:
173 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
175 self.__Type = "Method"
176 elif fromMatrix is not None:
178 if isinstance(fromMatrix, str):
179 fromMatrix = PlatformInfo.strmatrix2liststr( fromMatrix )
180 self.__Matrix = numpy.asarray( fromMatrix, dtype=float )
181 self.__Type = "Matrix"
187 def disableAvoidingRedundancy(self):
189 Operator.CM.disable()
191 def enableAvoidingRedundancy(self):
196 Operator.CM.disable()
202 def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False):
204 Permet de restituer le résultat de l'application de l'opérateur à une
205 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
206 argument devant a priori être du bon type.
208 - les arguments par série sont :
209 - xValue : argument adapté pour appliquer l'opérateur
210 - HValue : valeur précalculée de l'opérateur en ce point
211 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
218 if HValue is not None:
222 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
224 if _HValue is not None:
225 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
227 for i in range(len(_HValue)):
228 _HxValue.append( _HValue[i] )
230 Operator.CM.storeValueInX(_xValue[i], _HxValue[-1], self.__name)
235 for i, xv in enumerate(_xValue):
237 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv, self.__name)
239 __alreadyCalculated = False
241 if __alreadyCalculated:
242 self.__addOneCacheCall()
245 if self.__Matrix is not None:
246 self.__addOneMatrixCall()
247 _hv = self.__Matrix @ numpy.ravel(xv)
249 self.__addOneMethodCall()
253 _HxValue.append( _hv )
255 if len(_xserie) > 0 and self.__Matrix is None:
256 if self.__extraArgs is None:
257 _hserie = self.__Method( _xserie ) # Calcul MF
259 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
260 if not hasattr(_hserie, "pop"):
262 "The user input multi-function doesn't seem to return a" + \
263 " result sequence, behaving like a mono-function. It has" + \
270 Operator.CM.storeValueInX(_xv, _hv, self.__name)
272 if returnSerieAsArrayMatrix:
273 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
275 if argsAsSerie: return _HxValue # noqa: E701
276 else: return _HxValue[-1] # noqa: E241,E272,E701
278 def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
280 Permet de restituer le résultat de l'application de l'opérateur à des
281 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
282 argument devant a priori être du bon type. Si la uValue est None,
283 on suppose que l'opérateur ne s'applique qu'à xValue.
285 - paires : les arguments par paire sont :
286 - xValue : argument X adapté pour appliquer l'opérateur
287 - uValue : argument U adapté pour appliquer l'opérateur
288 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
290 if argsAsSerie: _xuValue = paires # noqa: E701
291 else: _xuValue = (paires,) # noqa: E241,E272,E701
292 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
294 if self.__Matrix is not None:
296 for paire in _xuValue:
297 _xValue, _uValue = paire
298 self.__addOneMatrixCall()
299 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
302 for paire in _xuValue:
303 _xValue, _uValue = paire
304 if _uValue is not None:
305 _xuArgs.append( paire )
307 _xuArgs.append( _xValue )
308 self.__addOneMethodCall( len(_xuArgs) )
309 if self.__extraArgs is None:
310 _HxValue = self.__Method( _xuArgs ) # Calcul MF
312 _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF
314 if returnSerieAsArrayMatrix:
315 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
317 if argsAsSerie: return _HxValue # noqa: E701
318 else: return _HxValue[-1] # noqa: E241,E272,E701
320 def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
322 Permet de restituer le résultat de l'application de l'opérateur à une
323 série d'arguments xValue, sachant que l'opérateur est valable en
324 xNominal. Cette méthode se contente d'appliquer, son argument devant a
325 priori être du bon type. Si l'opérateur est linéaire car c'est une
326 matrice, alors il est valable en tout point nominal et xNominal peut
327 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
328 permet d'indiquer que l'argument est multi-paires.
330 - paires : les arguments par paire sont :
331 - xNominal : série d'arguments permettant de donner le point où
332 l'opérateur est construit pour être ensuite appliqué
333 - xValue : série d'arguments adaptés pour appliquer l'opérateur
334 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
336 if argsAsSerie: _nxValue = paires # noqa: E701
337 else: _nxValue = (paires,) # noqa: E241,E272,E701
338 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
340 if self.__Matrix is not None:
342 for paire in _nxValue:
343 _xNominal, _xValue = paire
344 self.__addOneMatrixCall()
345 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
347 self.__addOneMethodCall( len(_nxValue) )
348 if self.__extraArgs is None:
349 _HxValue = self.__Method( _nxValue ) # Calcul MF
351 _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
353 if returnSerieAsArrayMatrix:
354 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
356 if argsAsSerie: return _HxValue # noqa: E701
357 else: return _HxValue[-1] # noqa: E241,E272,E701
359 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
361 Permet de renvoyer l'opérateur sous la forme d'une matrice
363 if self.__Matrix is not None:
364 self.__addOneMatrixCall()
365 mValue = [self.__Matrix,]
366 elif not isinstance(ValueForMethodForm, str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
369 self.__addOneMethodCall( len(ValueForMethodForm) )
370 for _vfmf in ValueForMethodForm:
371 mValue.append( self.__Method(((_vfmf, None),)) )
373 self.__addOneMethodCall()
374 mValue = self.__Method(((ValueForMethodForm, None),))
376 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
378 if argsAsSerie: return mValue # noqa: E701
379 else: return mValue[-1] # noqa: E241,E272,E701
383 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
384 la forme d'une matrice
386 if self.__Matrix is not None:
387 return self.__Matrix.shape
389 raise ValueError("Matrix form of the operator is not available, nor the shape")
391 def nbcalls(self, which=None):
393 Renvoie les nombres d'évaluations de l'opérateur
396 self.__NbCallsAsMatrix + self.__NbCallsAsMethod,
397 self.__NbCallsAsMatrix,
398 self.__NbCallsAsMethod,
399 self.__NbCallsOfCached,
400 Operator.NbCallsAsMatrix + Operator.NbCallsAsMethod,
401 Operator.NbCallsAsMatrix,
402 Operator.NbCallsAsMethod,
403 Operator.NbCallsOfCached,
405 if which is None: return __nbcalls # noqa: E701
406 else: return __nbcalls[which] # noqa: E241,E272,E701
408 def __addOneMatrixCall(self):
409 "Comptabilise un appel"
410 self.__NbCallsAsMatrix += 1 # Decompte local
411 Operator.NbCallsAsMatrix += 1 # Decompte global
413 def __addOneMethodCall(self, nb = 1):
414 "Comptabilise un appel"
415 self.__NbCallsAsMethod += nb # Decompte local
416 Operator.NbCallsAsMethod += nb # Decompte global
418 def __addOneCacheCall(self):
419 "Comptabilise un appel"
420 self.__NbCallsOfCached += 1 # Décompte local
421 Operator.NbCallsOfCached += 1 # Décompte global
423 # ==============================================================================
424 class FullOperator(object):
426 Classe générale d'interface de type opérateur complet
427 (Direct, Linéaire Tangent, Adjoint)
430 "__name", "__check", "__extraArgs", "__FO", "__T",
434 name = "GenericFullOperator",
436 asOneFunction = None, # 1 Fonction
437 asThreeFunctions = None, # 3 Fonctions in a dictionary
438 asScript = None, # 1 or 3 Fonction(s) by script
439 asDict = None, # Parameters
441 extraArguments = None,
442 performancePrf = None,
443 inputAsMF = False, # Fonction(s) as Multi-Functions
445 toBeChecked = False ):
447 self.__name = str(name)
448 self.__check = bool(toBeChecked)
449 self.__extraArgs = extraArguments
454 if (asDict is not None) and isinstance(asDict, dict):
455 __Parameters.update( asDict ) # Copie mémoire
456 # Deprecated parameters
457 __Parameters = self.__deprecateOpt(
458 collection = __Parameters,
459 oldn = "EnableMultiProcessing",
460 newn = "EnableWiseParallelism",
462 __Parameters = self.__deprecateOpt(
463 collection = __Parameters,
464 oldn = "EnableMultiProcessingInEvaluation",
465 newn = "EnableParallelEvaluations",
467 __Parameters = self.__deprecateOpt(
468 collection = __Parameters,
469 oldn = "EnableMultiProcessingInDerivatives",
470 newn = "EnableParallelDerivatives",
472 # Priorité à EnableParallelDerivatives=True
473 if "EnableWiseParallelism" in __Parameters and __Parameters["EnableWiseParallelism"]:
474 __Parameters["EnableParallelDerivatives"] = True
475 __Parameters["EnableParallelEvaluations"] = False
476 if "EnableParallelDerivatives" not in __Parameters:
477 __Parameters["EnableParallelDerivatives"] = False
478 if __Parameters["EnableParallelDerivatives"]:
479 __Parameters["EnableParallelEvaluations"] = False
480 if "EnableParallelEvaluations" not in __Parameters:
481 __Parameters["EnableParallelEvaluations"] = False
482 if "withIncrement" in __Parameters: # Temporaire
483 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
485 __reduceM, __avoidRC = True, True # Défaut
486 if performancePrf is not None:
487 if performancePrf == "ReducedAmountOfCalculation":
488 __reduceM, __avoidRC = False, True
489 elif performancePrf == "ReducedMemoryFootprint":
490 __reduceM, __avoidRC = True, False
491 elif performancePrf == "NoSavings":
492 __reduceM, __avoidRC = False, False
493 # "ReducedOverallRequirements" et tous les autres choix (y.c rien)
494 # sont équivalents au défaut
496 if asScript is not None:
497 __Matrix, __Function = None, None
499 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
501 __Function = { "Direct": Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
502 __Function.update({"useApproximatedDerivatives": True})
503 __Function.update(__Parameters)
504 elif asThreeFunctions:
506 "Direct": Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
507 "Tangent": Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
508 "Adjoint": Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
510 __Function.update(__Parameters)
513 if asOneFunction is not None:
514 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
515 if asOneFunction["Direct"] is not None:
516 __Function = asOneFunction
518 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
520 __Function = { "Direct": asOneFunction }
521 __Function.update({"useApproximatedDerivatives": True})
522 __Function.update(__Parameters)
523 elif asThreeFunctions is not None:
524 if isinstance(asThreeFunctions, dict) and \
525 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
526 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
527 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
528 __Function = asThreeFunctions
529 elif isinstance(asThreeFunctions, dict) and \
530 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
531 __Function = asThreeFunctions
532 __Function.update({"useApproximatedDerivatives": True})
535 "The functions has to be given in a dictionnary which have either" + \
536 " 1 key (\"Direct\") or" + \
537 " 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
538 if "Direct" not in asThreeFunctions:
539 __Function["Direct"] = asThreeFunctions["Tangent"]
540 __Function.update(__Parameters)
544 if appliedInX is not None and isinstance(appliedInX, dict):
545 __appliedInX = appliedInX
546 elif appliedInX is not None:
547 __appliedInX = {"HXb": appliedInX}
551 if scheduledBy is not None:
552 self.__T = scheduledBy
554 if isinstance(__Function, dict) and \
555 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
556 ("Direct" in __Function) and (__Function["Direct"] is not None):
557 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False # noqa: E272,E701
558 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01 # noqa: E272,E701
559 if "withdX" not in __Function: __Function["withdX"] = None # noqa: E272,E701
560 if "withReducingMemoryUse" not in __Function: __Function["withReducingMemoryUse"] = __reduceM # noqa: E272,E701
561 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = __avoidRC # noqa: E272,E701
562 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18 # noqa: E272,E701
563 if "withLengthOfRedundancy" not in __Function: __Function["withLengthOfRedundancy"] = -1 # noqa: E272,E701
564 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None # noqa: E272,E701
565 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF # noqa: E272,E701
566 from daCore import NumericObjects
567 FDA = NumericObjects.FDApproximation(
569 Function = __Function["Direct"],
570 centeredDF = __Function["CenteredFiniteDifference"],
571 increment = __Function["DifferentialIncrement"],
572 dX = __Function["withdX"],
573 extraArguments = self.__extraArgs,
574 reducingMemoryUse = __Function["withReducingMemoryUse"],
575 avoidingRedundancy = __Function["withAvoidingRedundancy"],
576 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
577 lengthOfRedundancy = __Function["withLengthOfRedundancy"],
578 mpEnabled = __Function["EnableParallelDerivatives"],
579 mpWorkers = __Function["NumberOfProcesses"],
580 mfEnabled = __Function["withmfEnabled"],
582 self.__FO["Direct"] = Operator(
584 fromMethod = FDA.DirectOperator,
585 reducingMemoryUse = __reduceM,
586 avoidingRedundancy = __avoidRC,
587 inputAsMultiFunction = inputAsMF,
588 extraArguments = self.__extraArgs,
589 enableMultiProcess = __Parameters["EnableParallelEvaluations"] )
590 self.__FO["Tangent"] = Operator(
591 name = self.__name + "Tangent",
592 fromMethod = FDA.TangentOperator,
593 reducingMemoryUse = __reduceM,
594 avoidingRedundancy = __avoidRC,
595 inputAsMultiFunction = inputAsMF,
596 extraArguments = self.__extraArgs )
597 self.__FO["Adjoint"] = Operator(
598 name = self.__name + "Adjoint",
599 fromMethod = FDA.AdjointOperator,
600 reducingMemoryUse = __reduceM,
601 avoidingRedundancy = __avoidRC,
602 inputAsMultiFunction = inputAsMF,
603 extraArguments = self.__extraArgs )
604 self.__FO["DifferentialIncrement"] = __Function["DifferentialIncrement"]
605 elif isinstance(__Function, dict) and \
606 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
607 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
608 self.__FO["Direct"] = Operator(
610 fromMethod = __Function["Direct"],
611 reducingMemoryUse = __reduceM,
612 avoidingRedundancy = __avoidRC,
613 inputAsMultiFunction = inputAsMF,
614 extraArguments = self.__extraArgs,
615 enableMultiProcess = __Parameters["EnableParallelEvaluations"] )
616 self.__FO["Tangent"] = Operator(
617 name = self.__name + "Tangent",
618 fromMethod = __Function["Tangent"],
619 reducingMemoryUse = __reduceM,
620 avoidingRedundancy = __avoidRC,
621 inputAsMultiFunction = inputAsMF,
622 extraArguments = self.__extraArgs )
623 self.__FO["Adjoint"] = Operator(
624 name = self.__name + "Adjoint",
625 fromMethod = __Function["Adjoint"],
626 reducingMemoryUse = __reduceM,
627 avoidingRedundancy = __avoidRC,
628 inputAsMultiFunction = inputAsMF,
629 extraArguments = self.__extraArgs )
630 self.__FO["DifferentialIncrement"] = None
631 elif asMatrix is not None:
632 if isinstance(__Matrix, str):
633 __Matrix = PlatformInfo.strmatrix2liststr( __Matrix )
634 __matrice = numpy.asarray( __Matrix, dtype=float )
635 self.__FO["Direct"] = Operator(
637 fromMatrix = __matrice,
638 reducingMemoryUse = __reduceM,
639 avoidingRedundancy = __avoidRC,
640 inputAsMultiFunction = inputAsMF,
641 enableMultiProcess = __Parameters["EnableParallelEvaluations"] )
642 self.__FO["Tangent"] = Operator(
643 name = self.__name + "Tangent",
644 fromMatrix = __matrice,
645 reducingMemoryUse = __reduceM,
646 avoidingRedundancy = __avoidRC,
647 inputAsMultiFunction = inputAsMF )
648 self.__FO["Adjoint"] = Operator(
649 name = self.__name + "Adjoint",
650 fromMatrix = __matrice.T,
651 reducingMemoryUse = __reduceM,
652 avoidingRedundancy = __avoidRC,
653 inputAsMultiFunction = inputAsMF )
655 self.__FO["DifferentialIncrement"] = None
658 "The %s object is improperly defined or undefined,"%self.__name + \
659 " it requires at minima either a matrix, a Direct operator for" + \
660 " approximate derivatives or a Tangent/Adjoint operators pair." + \
661 " Please check your operator input.")
663 if __appliedInX is not None:
664 self.__FO["AppliedInX"] = {}
665 for key in __appliedInX:
666 if isinstance(__appliedInX[key], str):
667 __appliedInX[key] = PlatformInfo.strvect2liststr( __appliedInX[key] )
668 self.__FO["AppliedInX"][key] = numpy.ravel( __appliedInX[key] ).reshape((-1, 1))
670 self.__FO["AppliedInX"] = None
675 def nbcalls(self, whot=None, which=None):
677 Renvoie les nombres d'évaluations de l'opérateur
680 for otype in ["Direct", "Tangent", "Adjoint"]:
681 if otype in self.__FO:
682 __nbcalls[otype] = self.__FO[otype].nbcalls()
683 if whot in __nbcalls and which is not None:
684 return __nbcalls[whot][which]
689 "x.__repr__() <==> repr(x)"
690 return repr(self.__FO)
693 "x.__str__() <==> str(x)"
694 return str(self.__FO)
696 def __deprecateOpt(self, collection: dict, oldn: str, newn: str):
697 if oldn in collection:
698 collection[newn] = collection[oldn]
700 __msg = "the parameter \"%s\" used in this case is"%(oldn,)
701 __msg += " deprecated and has to be replaced by \"%s\"."%(newn,)
702 __msg += " Please update your code."
703 warnings.warn(__msg, FutureWarning, stacklevel=50)
706 # ==============================================================================
707 class Algorithm(object):
709 Classe générale d'interface de type algorithme
711 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
712 d'assimilation, en fournissant un container (dictionnaire) de variables
713 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
715 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
718 "_name", "_parameters", "__internal_state", "__required_parameters",
719 "_m", "__variable_names_not_public", "__canonical_parameter_name",
720 "__canonical_stored_name", "__replace_by_the_new_name",
724 def __init__(self, name):
726 L'initialisation présente permet de fabriquer des variables de stockage
727 disponibles de manière générique dans les algorithmes élémentaires. Ces
728 variables de stockage sont ensuite conservées dans un dictionnaire
729 interne à l'objet, mais auquel on accède par la méthode "get".
731 Les variables prévues sont :
732 - APosterioriCorrelations : matrice de corrélations de la matrice A
733 - APosterioriCovariance : matrice de covariances a posteriori : A
734 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
735 - APosterioriVariances : vecteur des variances de la matrice A
736 - Analysis : vecteur d'analyse : Xa
737 - BMA : Background moins Analysis : Xa - Xb
738 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
739 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
740 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
741 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
742 - CostFunctionJo : partie observations de la fonction-coût : Jo
743 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
744 - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
745 - CurrentOptimum : état optimal courant lors d'itérations
746 - CurrentState : état courant lors d'itérations
747 - CurrentStepNumber : pas courant d'avancement dans les algorithmes en évolution, à partir de 0
748 - EnsembleOfSimulations : ensemble d'états (sorties, simulations) rangés par colonne dans une matrice
749 - EnsembleOfSnapshots : ensemble d'états rangés par colonne dans une matrice
750 - EnsembleOfStates : ensemble d'états (entrées, paramètres) rangés par colonne dans une matrice
751 - ForecastCovariance : covariance de l'état prédit courant lors d'itérations
752 - ForecastState : état prédit courant lors d'itérations
753 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
754 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
755 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
756 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
757 - Innovation : l'innovation : d = Y - H(X)
758 - InnovationAtCurrentAnalysis : l'innovation à l'état analysé : da = Y - H(Xa)
759 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
760 - InternalCostFunctionJ : ensemble de valeurs internes de fonction-coût J dans un vecteur
761 - InternalCostFunctionJb : ensemble de valeurs internes de fonction-coût Jb dans un vecteur
762 - InternalCostFunctionJb : ensemble de valeurs internes de fonction-coût Jo dans un vecteur
763 - InternalStates : ensemble d'états internes rangés par colonne dans une matrice (=EnsembleOfSnapshots)
764 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
765 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
766 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
767 - KalmanGainAtOptimum : gain de Kalman à l'optimum
768 - MahalanobisConsistency : indicateur de consistance des covariances
769 - OMA : Observation moins Analyse : Y - Xa
770 - OMB : Observation moins Background : Y - Xb
771 - ReducedCoordinates : coordonnées dans la base réduite
772 - Residu : dans le cas des algorithmes de vérification
773 - SampledStateForQuantiles : échantillons d'états pour l'estimation des quantiles
774 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
775 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
776 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
777 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
778 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
779 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
780 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
781 - SingularValues : valeurs singulières provenant d'une décomposition SVD
782 On peut rajouter des variables à stocker dans l'initialisation de
783 l'algorithme élémentaire qui va hériter de cette classe
785 logging.debug("%s Initialisation", str(name))
786 self._m = PlatformInfo.SystemUsage()
788 self._name = str( name )
789 self._parameters = {"StoreSupplementaryCalculations": []}
790 self.__internal_state = {}
791 self.__required_parameters = {}
792 self.__required_inputs = {
793 "RequiredInputValues": {"mandatory": (), "optional": ()},
794 "AttributesTags": [],
795 "AttributesFeatures": [],
797 self.__variable_names_not_public = {"nextStep": False} # Duplication dans AlgorithmAndParameters
798 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
799 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
800 self.__replace_by_the_new_name = {} # Nouveau nom à partir d'un nom ancien
802 self.StoredVariables = {}
803 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
804 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
805 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
806 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
807 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
808 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
809 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
810 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
811 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
812 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
813 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
814 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
815 self.StoredVariables["CurrentEnsembleState"] = Persistence.OneMatrix(name = "CurrentEnsembleState")
816 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
817 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
818 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
819 self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
820 self.StoredVariables["EnsembleOfSimulations"] = Persistence.OneMatrice(name = "EnsembleOfSimulations")
821 self.StoredVariables["EnsembleOfSnapshots"] = Persistence.OneMatrice(name = "EnsembleOfSnapshots")
822 self.StoredVariables["EnsembleOfStates"] = Persistence.OneMatrice(name = "EnsembleOfStates")
823 self.StoredVariables["ExcludedPoints"] = Persistence.OneVector(name = "ExcludedPoints")
824 self.StoredVariables["ForecastCovariance"] = Persistence.OneMatrix(name = "ForecastCovariance")
825 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
826 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
827 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
828 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
829 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
830 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
831 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
832 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
833 self.StoredVariables["InternalCostFunctionJ"] = Persistence.OneVector(name = "InternalCostFunctionJ")
834 self.StoredVariables["InternalCostFunctionJb"] = Persistence.OneVector(name = "InternalCostFunctionJb")
835 self.StoredVariables["InternalCostFunctionJo"] = Persistence.OneVector(name = "InternalCostFunctionJo")
836 self.StoredVariables["InternalStates"] = Persistence.OneMatrix(name = "InternalStates")
837 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
838 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
839 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
840 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
841 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
842 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
843 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
844 self.StoredVariables["OptimalPoints"] = Persistence.OneVector(name = "OptimalPoints")
845 self.StoredVariables["ReducedBasis"] = Persistence.OneMatrix(name = "ReducedBasis")
846 self.StoredVariables["ReducedBasisMus"] = Persistence.OneVector(name = "ReducedBasisMus")
847 self.StoredVariables["ReducedCoordinates"] = Persistence.OneVector(name = "ReducedCoordinates")
848 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
849 self.StoredVariables["Residus"] = Persistence.OneVector(name = "Residus")
850 self.StoredVariables["SampledStateForQuantiles"] = Persistence.OneMatrix(name = "SampledStateForQuantiles")
851 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
852 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
853 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
854 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
855 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
856 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
857 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
858 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
859 self.StoredVariables["SingularValues"] = Persistence.OneVector(name = "SingularValues")
861 for k in self.StoredVariables:
862 self.__canonical_stored_name[k.lower()] = k
864 for k, v in self.__variable_names_not_public.items():
865 self.__canonical_parameter_name[k.lower()] = k
866 self.__canonical_parameter_name["algorithm"] = "Algorithm"
867 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
869 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
871 logging.debug("%s Lancement", self._name)
872 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
873 self._getTimeState(reset=True)
875 # Mise à jour des paramètres internes avec le contenu de Parameters, en
876 # reprenant les valeurs par défauts pour toutes celles non définies
877 self.__setParameters(Parameters, reset=True) # Copie mémoire
878 for k, v in self.__variable_names_not_public.items():
879 if k not in self._parameters:
880 self.__setParameters( {k: v} )
882 def __test_vvalue(argument, variable, argname, symbol=None):
883 "Corrections et compléments des vecteurs"
887 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
888 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name, argname, symbol))
889 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
890 logging.debug("%s %s vector %s is not set, but is optional."%(self._name, argname, symbol))
892 logging.debug("%s %s vector %s is not set, but is not required."%(self._name, argname, symbol))
894 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
896 "%s %s vector %s is required and set, and its full size is %i." \
897 % (self._name, argname, symbol, numpy.array(argument).size))
898 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
900 "%s %s vector %s is optional and set, and its full size is %i." \
901 % (self._name, argname, symbol, numpy.array(argument).size))
904 "%s %s vector %s is set although neither required nor optional, and its full size is %i." \
905 % (self._name, argname, symbol, numpy.array(argument).size))
907 __test_vvalue( Xb, "Xb", "Background or initial state" )
908 __test_vvalue( Y, "Y", "Observation" )
909 __test_vvalue( U, "U", "Control" )
911 def __test_cvalue(argument, variable, argname, symbol=None):
912 "Corrections et compléments des covariances"
916 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
917 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name, argname, symbol))
918 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
919 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name, argname, symbol))
921 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name, argname, symbol))
923 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
924 logging.debug("%s %s error covariance matrix %s is required and set."%(self._name, argname, symbol))
925 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
926 logging.debug("%s %s error covariance matrix %s is optional and set."%(self._name, argname, symbol))
929 "%s %s error covariance matrix %s is set although neither required nor optional." \
930 % (self._name, argname, symbol))
932 __test_cvalue( B, "B", "Background" )
933 __test_cvalue( R, "R", "Observation" )
934 __test_cvalue( Q, "Q", "Evolution" )
936 def __test_ovalue(argument, variable, argname, symbol=None):
937 "Corrections et compléments des opérateurs"
940 if argument is None or (isinstance(argument, dict) and len(argument) == 0):
941 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
942 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name, argname, symbol))
943 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
944 logging.debug("%s %s operator %s is not set, but is optional."%(self._name, argname, symbol))
946 logging.debug("%s %s operator %s is not set, but is not required."%(self._name, argname, symbol))
948 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
949 logging.debug("%s %s operator %s is required and set."%(self._name, argname, symbol))
950 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
951 logging.debug("%s %s operator %s is optional and set."%(self._name, argname, symbol))
953 logging.debug("%s %s operator %s is set although neither required nor optional."%(self._name, argname, symbol))
955 __test_ovalue( HO, "HO", "Observation", "H" )
956 __test_ovalue( EM, "EM", "Evolution", "M" )
957 __test_ovalue( CM, "CM", "Control Model", "C" )
959 # Corrections et compléments des bornes
960 if ("Bounds" in self._parameters) \
961 and isinstance(self._parameters["Bounds"], (list, tuple)):
962 if (len(self._parameters["Bounds"]) > 0):
963 logging.debug("%s Bounds taken into account"%(self._name,))
965 self._parameters["Bounds"] = None
966 elif ("Bounds" in self._parameters) \
967 and isinstance(self._parameters["Bounds"], (numpy.ndarray, numpy.matrix)):
968 self._parameters["Bounds"] = numpy.ravel(self._parameters["Bounds"]).reshape((-1, 2)).tolist()
969 if (len(self._parameters["Bounds"]) > 0):
970 logging.debug("%s Bounds for states taken into account"%(self._name,))
972 self._parameters["Bounds"] = None
974 self._parameters["Bounds"] = None
975 if self._parameters["Bounds"] is None:
976 logging.debug("%s There are no bounds for states to take into account"%(self._name,))
978 if ("StateBoundsForQuantiles" in self._parameters) \
979 and isinstance(self._parameters["StateBoundsForQuantiles"], (list, tuple)) \
980 and (len(self._parameters["StateBoundsForQuantiles"]) > 0):
981 logging.debug("%s Bounds for quantiles states taken into account"%(self._name,))
982 elif ("StateBoundsForQuantiles" in self._parameters) \
983 and isinstance(self._parameters["StateBoundsForQuantiles"], (numpy.ndarray, numpy.matrix)):
984 self._parameters["StateBoundsForQuantiles"] = numpy.ravel(self._parameters["StateBoundsForQuantiles"]).reshape((-1, 2)).tolist()
985 if (len(self._parameters["StateBoundsForQuantiles"]) > 0):
986 logging.debug("%s Bounds for quantiles states taken into account"%(self._name,))
987 # Attention : contrairement à Bounds, il n'y a pas de défaut à None,
988 # sinon on ne peut pas être sans bornes
990 # Corrections et compléments de l'initialisation en X
991 if "InitializationPoint" in self._parameters:
993 if self._parameters["InitializationPoint"] is not None and hasattr(self._parameters["InitializationPoint"], 'size'):
994 if self._parameters["InitializationPoint"].size != numpy.ravel(Xb).size:
996 "Incompatible size %i of forced initial point that have to replace the background of size %i" \
997 % (self._parameters["InitializationPoint"].size, numpy.ravel(Xb).size))
998 # Obtenu par typecast : numpy.ravel(self._parameters["InitializationPoint"])
1000 self._parameters["InitializationPoint"] = numpy.ravel(Xb)
1002 if self._parameters["InitializationPoint"] is None:
1003 raise ValueError("Forced initial point can not be set without any given Background or required value")
1005 # Correction pour pallier a un bug de TNC sur le retour du Minimum
1006 if "Minimizer" in self._parameters and self._parameters["Minimizer"] == "TNC":
1007 self.setParameterValue("StoreInternalVariables", True)
1009 # Verbosité et logging
1010 if logging.getLogger().level < logging.WARNING:
1011 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
1012 self._parameters["optmessages"] = 15
1014 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
1015 self._parameters["optmessages"] = 0
1019 def _post_run(self, _oH=None, _oM=None):
1021 if ("StoreSupplementaryCalculations" in self._parameters) and \
1022 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
1023 for _A in self.StoredVariables["APosterioriCovariance"]:
1024 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
1025 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
1026 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
1027 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
1028 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
1029 _EI = numpy.diag(1. / numpy.sqrt(numpy.diag(_A)))
1030 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
1031 self.StoredVariables["APosterioriCorrelations"].store( _C )
1032 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
1034 "%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i",
1035 self._name, _oH["Direct"].nbcalls(0), _oH["Tangent"].nbcalls(0), _oH["Adjoint"].nbcalls(0))
1037 "%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i",
1038 self._name, _oH["Direct"].nbcalls(3), _oH["Tangent"].nbcalls(3), _oH["Adjoint"].nbcalls(3))
1039 if _oM is not None and "Direct" in _oM and "Tangent" in _oM and "Adjoint" in _oM:
1041 "%s Nombre d'évaluation(s) de l'opérateur d'évolution direct/tangent/adjoint.: %i/%i/%i",
1042 self._name, _oM["Direct"].nbcalls(0), _oM["Tangent"].nbcalls(0), _oM["Adjoint"].nbcalls(0))
1044 "%s Nombre d'appels au cache d'opérateur d'évolution direct/tangent/adjoint..: %i/%i/%i",
1045 self._name, _oM["Direct"].nbcalls(3), _oM["Tangent"].nbcalls(3), _oM["Adjoint"].nbcalls(3))
1046 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
1047 logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
1048 logging.debug("%s Terminé", self._name)
1051 def _toStore(self, key):
1052 "True if in StoreSupplementaryCalculations, else False"
1053 return key in self._parameters["StoreSupplementaryCalculations"]
1055 def get(self, key=None):
1057 Renvoie l'une des variables stockées identifiée par la clé, ou le
1058 dictionnaire de l'ensemble des variables disponibles en l'absence de
1059 clé. Ce sont directement les variables sous forme objet qui sont
1060 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
1061 des classes de persistance.
1064 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
1066 return self.StoredVariables
1068 def __contains__(self, key=None):
1069 "D.__contains__(k) -> True if D has a key k, else False"
1070 if key is None or key.lower() not in self.__canonical_stored_name:
1073 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
1076 "D.keys() -> list of D's keys"
1077 if hasattr(self, "StoredVariables"):
1078 return self.StoredVariables.keys()
1082 def pop(self, k, d):
1083 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
1084 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
1085 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
1090 raise TypeError("pop expected at least 1 arguments, got 0")
1091 "If key is not found, d is returned if given, otherwise KeyError is raised"
1097 def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
1099 Doit implémenter l'opération élémentaire de calcul algorithmique.
1101 raise NotImplementedError("Mathematical algorithmic calculation has not been implemented!")
1103 def defineRequiredParameter(
1115 Permet de définir dans l'algorithme des paramètres requis et leurs
1116 caractéristiques par défaut.
1119 raise ValueError("A name is mandatory to define a required parameter.")
1121 self.__required_parameters[name] = {
1122 "default" : default, # noqa: E203
1123 "typecast" : typecast, # noqa: E203
1124 "minval" : minval, # noqa: E203
1125 "maxval" : maxval, # noqa: E203
1126 "listval" : listval, # noqa: E203
1127 "listadv" : listadv, # noqa: E203
1128 "message" : message, # noqa: E203
1129 "oldname" : oldname, # noqa: E203
1131 self.__canonical_parameter_name[name.lower()] = name
1132 if oldname is not None:
1133 self.__canonical_parameter_name[oldname.lower()] = name # Conversion
1134 self.__replace_by_the_new_name[oldname.lower()] = name
1135 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
1137 def getRequiredParameters(self, noDetails=True):
1139 Renvoie la liste des noms de paramètres requis ou directement le
1140 dictionnaire des paramètres requis.
1143 return sorted(self.__required_parameters.keys())
1145 return self.__required_parameters
1147 def setParameterValue(self, name=None, value=None):
1149 Renvoie la valeur d'un paramètre requis de manière contrôlée
1151 __k = self.__canonical_parameter_name[name.lower()]
1152 default = self.__required_parameters[__k]["default"]
1153 typecast = self.__required_parameters[__k]["typecast"]
1154 minval = self.__required_parameters[__k]["minval"]
1155 maxval = self.__required_parameters[__k]["maxval"]
1156 listval = self.__required_parameters[__k]["listval"]
1157 listadv = self.__required_parameters[__k]["listadv"]
1159 if value is None and default is None:
1161 elif value is None and default is not None:
1162 if typecast is None:
1165 __val = typecast( default )
1167 if typecast is None:
1171 __val = typecast( value )
1173 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
1175 if minval is not None and (numpy.array(__val, float) < minval).any():
1176 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
1177 if maxval is not None and (numpy.array(__val, float) > maxval).any():
1178 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
1179 if listval is not None or listadv is not None:
1180 if typecast is list or typecast is tuple or isinstance(__val, list) or isinstance(__val, tuple):
1182 if listval is not None and v in listval:
1184 elif listadv is not None and v in listadv:
1187 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
1188 elif not (listval is not None and __val in listval) and not (listadv is not None and __val in listadv):
1189 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(__val, __k, listval))
1191 if __k in ["SetSeed",]:
1196 def requireInputArguments(self, mandatory=(), optional=()):
1198 Permet d'imposer des arguments de calcul requis en entrée.
1200 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
1201 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
1203 def getInputArguments(self):
1205 Permet d'obtenir les listes des arguments de calcul requis en entrée.
1207 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
1209 def setAttributes(self, tags=(), features=()):
1211 Permet d'adjoindre des attributs comme les tags de classification.
1212 Renvoie la liste actuelle dans tous les cas.
1214 self.__required_inputs["AttributesTags"].extend( tags )
1215 self.__required_inputs["AttributesFeatures"].extend( features )
1216 return (self.__required_inputs["AttributesTags"], self.__required_inputs["AttributesFeatures"])
1218 def __setParameters(self, fromDico={}, reset=False):
1220 Permet de stocker les paramètres reçus dans le dictionnaire interne.
1222 self._parameters.update( fromDico )
1223 __inverse_fromDico_keys = {}
1224 for k in fromDico.keys():
1225 if k.lower() in self.__canonical_parameter_name:
1226 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
1227 # __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
1228 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
1230 for k in __inverse_fromDico_keys.values():
1231 if k.lower() in self.__replace_by_the_new_name:
1232 __newk = self.__replace_by_the_new_name[k.lower()]
1233 __msg = "the parameter \"%s\" used in \"%s\" algorithm case is deprecated and has to be replaced by \"%s\"."%(k, self._name, __newk)
1234 __msg += " Please update your code."
1235 warnings.warn(__msg, FutureWarning, stacklevel=50)
1237 for k in self.__required_parameters.keys():
1238 if k in __canonic_fromDico_keys:
1239 self._parameters[k] = self.setParameterValue(k, fromDico[__inverse_fromDico_keys[k]])
1241 self._parameters[k] = self.setParameterValue(k)
1244 if hasattr(self._parameters[k], "size") and self._parameters[k].size > 100:
1245 logging.debug("%s %s d'une taille totale de %s", self._name, self.__required_parameters[k]["message"], self._parameters[k].size)
1246 elif hasattr(self._parameters[k], "__len__") and len(self._parameters[k]) > 100:
1247 logging.debug("%s %s de longueur %s", self._name, self.__required_parameters[k]["message"], len(self._parameters[k]))
1249 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
1251 def _setInternalState(self, key=None, value=None, fromDico={}, reset=False):
1253 Permet de stocker des variables nommées constituant l'état interne
1255 if reset: # Vide le dictionnaire préalablement
1256 self.__internal_state = {}
1257 if key is not None and value is not None:
1258 self.__internal_state[key] = value
1259 self.__internal_state.update( dict(fromDico) )
1261 def _getInternalState(self, key=None):
1263 Restitue un état interne sous la forme d'un dictionnaire de variables nommées
1265 if key is not None and key in self.__internal_state:
1266 return self.__internal_state[key]
1268 return self.__internal_state
1270 def _getTimeState(self, reset=False):
1272 Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde
1275 self.__initial_cpu_time = time.process_time()
1276 self.__initial_elapsed_time = time.perf_counter()
1279 self.__cpu_time = time.process_time() - self.__initial_cpu_time
1280 self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time
1281 return self.__cpu_time, self.__elapsed_time
1283 def _StopOnTimeLimit(self, X=None, withReason=False):
1284 "Stop criteria on time limit: True/False [+ Reason]"
1285 c, e = self._getTimeState()
1286 if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]:
1287 __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"])
1288 elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]:
1289 __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"])
1291 __SC, __SR = False, ""
1297 # ==============================================================================
1298 class PartialAlgorithm(object):
1300 Classe pour mimer "Algorithm" du point de vue stockage, mais sans aucune
1301 action avancée comme la vérification . Pour les méthodes reprises ici,
1302 le fonctionnement est identique à celles de la classe "Algorithm".
1305 "_name", "_parameters", "StoredVariables", "__canonical_stored_name",
1308 def __init__(self, name):
1309 self._name = str( name )
1310 self._parameters = {"StoreSupplementaryCalculations": []}
1312 self.StoredVariables = {}
1313 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
1314 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
1315 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
1316 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
1317 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
1318 self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
1320 self.__canonical_stored_name = {}
1321 for k in self.StoredVariables:
1322 self.__canonical_stored_name[k.lower()] = k
1324 def _toStore(self, key):
1325 "True if in StoreSupplementaryCalculations, else False"
1326 return key in self._parameters["StoreSupplementaryCalculations"]
1328 def get(self, key=None):
1330 Renvoie l'une des variables stockées identifiée par la clé, ou le
1331 dictionnaire de l'ensemble des variables disponibles en l'absence de
1332 clé. Ce sont directement les variables sous forme objet qui sont
1333 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
1334 des classes de persistance.
1337 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
1339 return self.StoredVariables
1341 # ==============================================================================
1342 class AlgorithmAndParameters(object):
1344 Classe générale d'interface d'action pour l'algorithme et ses paramètres
1347 "__name", "__algorithm", "__algorithmFile", "__algorithmName", "__A",
1348 "__P", "__Xb", "__Y", "__U", "__HO", "__EM", "__CM", "__B", "__R",
1349 "__Q", "__variable_names_not_public",
1353 name = "GenericAlgorithm",
1359 self.__name = str(name)
1363 self.__algorithm = {}
1364 self.__algorithmFile = None
1365 self.__algorithmName = None
1367 self.updateParameters( asDict, asScript )
1369 if asAlgorithm is None and asScript is not None:
1370 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1372 __Algo = asAlgorithm
1374 if __Algo is not None:
1375 self.__A = str(__Algo)
1376 self.__P.update( {"Algorithm": self.__A} )
1378 self.__setAlgorithm( self.__A )
1380 self.__variable_names_not_public = {"nextStep": False} # Duplication dans Algorithm
1382 def updateParameters(self, asDict = None, asScript = None ):
1383 "Mise à jour des paramètres"
1384 if asDict is None and asScript is not None:
1385 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1389 if __Dict is not None:
1390 self.__P.update( dict(__Dict) )
1392 def executePythonScheme(self, asDictAO = None):
1393 "Permet de lancer le calcul d'assimilation"
1394 Operator.CM.clearCache()
1396 if not isinstance(asDictAO, dict):
1397 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1398 if hasattr(asDictAO["Background"], "getO"): self.__Xb = asDictAO["Background"].getO() # noqa: E241,E701
1399 elif hasattr(asDictAO["CheckingPoint"], "getO"): self.__Xb = asDictAO["CheckingPoint"].getO() # noqa: E241,E701
1400 else: self.__Xb = None # noqa: E241,E701
1401 if hasattr(asDictAO["Observation"], "getO"): self.__Y = asDictAO["Observation"].getO() # noqa: E241,E701
1402 else: self.__Y = asDictAO["Observation"] # noqa: E241,E701
1403 if hasattr(asDictAO["ControlInput"], "getO"): self.__U = asDictAO["ControlInput"].getO() # noqa: E241,E701
1404 else: self.__U = asDictAO["ControlInput"] # noqa: E241,E701
1405 if hasattr(asDictAO["ObservationOperator"], "getO"): self.__HO = asDictAO["ObservationOperator"].getO() # noqa: E241,E701
1406 else: self.__HO = asDictAO["ObservationOperator"] # noqa: E241,E701
1407 if hasattr(asDictAO["EvolutionModel"], "getO"): self.__EM = asDictAO["EvolutionModel"].getO() # noqa: E241,E701
1408 else: self.__EM = asDictAO["EvolutionModel"] # noqa: E241,E701
1409 if hasattr(asDictAO["ControlModel"], "getO"): self.__CM = asDictAO["ControlModel"].getO() # noqa: E241,E701
1410 else: self.__CM = asDictAO["ControlModel"] # noqa: E241,E701
1411 self.__B = asDictAO["BackgroundError"]
1412 self.__R = asDictAO["ObservationError"]
1413 self.__Q = asDictAO["EvolutionError"]
1415 self.__shape_validate()
1417 self.__algorithm.run(
1427 Parameters = self.__P,
1431 def executeYACSScheme(self, FileName=None):
1432 "Permet de lancer le calcul d'assimilation"
1433 if FileName is None or not os.path.exists(FileName):
1434 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1436 __file = os.path.abspath(FileName)
1437 logging.debug("The YACS file name is \"%s\"."%__file)
1438 if not PlatformInfo.has_salome or \
1439 not PlatformInfo.has_yacs or \
1440 not PlatformInfo.has_adao:
1443 "Unable to get SALOME, YACS or ADAO environnement variables.\n" + \
1444 "Please load the right environnement before trying to use it.\n" )
1447 import SALOMERuntime
1449 SALOMERuntime.RuntimeSALOME_setRuntime()
1451 r = pilot.getRuntime()
1452 xmlLoader = loader.YACSLoader()
1453 xmlLoader.registerProcCataLoader()
1455 catalogAd = r.loadCatalog("proc", __file)
1456 r.addCatalog(catalogAd)
1461 p = xmlLoader.load(__file)
1462 except IOError as ex:
1463 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1465 logger = p.getLogger("parser")
1466 if not logger.isEmpty():
1467 print("The imported YACS XML schema has errors on parsing:")
1468 print(logger.getStr())
1471 print("The YACS XML schema is not valid and will not be executed:")
1472 print(p.getErrorReport())
1474 info = pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1475 p.checkConsistency(info)
1476 if info.areWarningsOrErrors():
1477 print("The YACS XML schema is not coherent and will not be executed:")
1478 print(info.getGlobalRepr())
1480 e = pilot.ExecutorSwig()
1482 if p.getEffectiveState() != pilot.DONE:
1483 print(p.getErrorReport())
1487 def get(self, key = None):
1488 "Vérifie l'existence d'une clé de variable ou de paramètres"
1489 if key in self.__algorithm:
1490 return self.__algorithm.get( key )
1491 elif key in self.__P:
1492 return self.__P[key]
1494 allvariables = self.__P
1495 for k in self.__variable_names_not_public:
1496 allvariables.pop(k, None)
1499 def pop(self, k, d):
1500 "Necessaire pour le pickling"
1501 return self.__algorithm.pop(k, d)
1503 def getAlgorithmRequiredParameters(self, noDetails=True):
1504 "Renvoie la liste des paramètres requis selon l'algorithme"
1505 return self.__algorithm.getRequiredParameters(noDetails)
1507 def getAlgorithmInputArguments(self):
1508 "Renvoie la liste des entrées requises selon l'algorithme"
1509 return self.__algorithm.getInputArguments()
1511 def getAlgorithmAttributes(self):
1512 "Renvoie la liste des attributs selon l'algorithme"
1513 return self.__algorithm.setAttributes()
1515 def setObserver(self, __V, __O, __I, __A, __S):
1516 "Associe un observer à une variable unique"
1517 if self.__algorithm is None \
1518 or isinstance(self.__algorithm, dict) \
1519 or not hasattr(self.__algorithm, "StoredVariables"):
1520 raise ValueError("No observer can be build before choosing an algorithm.")
1521 if __V not in self.__algorithm:
1522 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1524 self.__algorithm.StoredVariables[ __V ].setDataObserver( HookFunction = __O, HookParameters = __I, Scheduler = __S )
1526 def setCrossObserver(self, __V, __O, __I, __A, __S):
1527 "Associe un observer à une collection ordonnée de variables"
1528 if self.__algorithm is None \
1529 or isinstance(self.__algorithm, dict) \
1530 or not hasattr(self.__algorithm, "StoredVariables"):
1531 raise ValueError("No observer can be build before choosing an algorithm.")
1532 if not isinstance(__V, (list, tuple)):
1533 raise ValueError("A cross observer requires to be set on a variable series which is not the case of %s."%__V)
1534 if len(__V) != len(__I):
1535 raise ValueError("The number of information fields has to be the same than the number of variables on which to set the observer.")
1538 if __eV not in self.__algorithm:
1539 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__eV)
1541 self.__algorithm.StoredVariables[ __eV ].setDataObserver( HookFunction = __O, HookParameters = __I, Scheduler = __S, Order = __V, OSync = __A, DOVar = self.__algorithm.StoredVariables )
1543 def removeObserver(self, __V, __O, __A = False):
1544 if self.__algorithm is None \
1545 or isinstance(self.__algorithm, dict) \
1546 or not hasattr(self.__algorithm, "StoredVariables"):
1547 raise ValueError("No observer can be removed before choosing an algorithm.")
1548 if __V not in self.__algorithm:
1549 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1551 return self.__algorithm.StoredVariables[ __V ].removeDataObserver( HookFunction = __O, AllObservers = __A )
1553 def hasObserver(self, __V):
1554 if self.__algorithm is None \
1555 or isinstance(self.__algorithm, dict) \
1556 or not hasattr(self.__algorithm, "StoredVariables"):
1558 if __V not in self.__algorithm:
1560 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1563 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1564 for k in self.__variable_names_not_public:
1565 if k in __allvariables:
1566 __allvariables.remove(k)
1567 return __allvariables
1569 def __contains__(self, key=None):
1570 "D.__contains__(k) -> True if D has a key k, else False"
1571 return key in self.__algorithm or key in self.__P
1574 "x.__repr__() <==> repr(x)"
1575 return repr(self.__A) + ", " + repr(self.__P)
1578 "x.__str__() <==> str(x)"
1579 return str(self.__A) + ", " + str(self.__P)
1581 def __setAlgorithm(self, choice = None ):
1583 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1584 d'assimilation. L'argument est un champ caractère se rapportant au nom
1585 d'un algorithme réalisant l'opération sur les arguments fixes.
1588 raise ValueError("Error: algorithm choice has to be given")
1589 if self.__algorithmName is not None:
1590 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1591 daDirectory = "daAlgorithms"
1593 # Recherche explicitement le fichier complet
1594 # ------------------------------------------
1596 for directory in sys.path:
1597 if os.path.isfile(os.path.join(directory, daDirectory, str(choice) + '.py')):
1598 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1599 if module_path is None:
1601 "No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1603 # Importe le fichier complet comme un module
1604 # ------------------------------------------
1606 sys_path_tmp = sys.path
1607 sys.path.insert(0, module_path)
1608 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1609 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1610 raise ImportError("this module does not define a valid elementary algorithm.")
1611 self.__algorithmName = str(choice)
1612 sys.path = sys_path_tmp
1614 except ImportError as e:
1616 "The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice, e))
1618 # Instancie un objet du type élémentaire du fichier
1619 # -------------------------------------------------
1620 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1623 def __shape_validate(self):
1625 Validation de la correspondance correcte des tailles des variables et
1626 des matrices s'il y en a.
1628 if self.__Xb is None: __Xb_shape = (0,) # noqa: E241,E701
1629 elif hasattr(self.__Xb, "size"): __Xb_shape = (self.__Xb.size,) # noqa: E241,E701
1630 elif hasattr(self.__Xb, "shape"):
1631 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape # noqa: E241,E701
1632 else: __Xb_shape = self.__Xb.shape() # noqa: E241,E701
1633 else: raise TypeError("The background (Xb) has no attribute of shape: problem !") # noqa: E701
1635 if self.__Y is None: __Y_shape = (0,) # noqa: E241,E701
1636 elif hasattr(self.__Y, "size"): __Y_shape = (self.__Y.size,) # noqa: E241,E701
1637 elif hasattr(self.__Y, "shape"):
1638 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape # noqa: E241,E701
1639 else: __Y_shape = self.__Y.shape() # noqa: E241,E701
1640 else: raise TypeError("The observation (Y) has no attribute of shape: problem !") # noqa: E701
1642 if self.__U is None: __U_shape = (0,) # noqa: E241,E701
1643 elif hasattr(self.__U, "size"): __U_shape = (self.__U.size,) # noqa: E241,E701
1644 elif hasattr(self.__U, "shape"):
1645 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape # noqa: E241,E701
1646 else: __U_shape = self.__U.shape() # noqa: E241,E701
1647 else: raise TypeError("The control (U) has no attribute of shape: problem !") # noqa: E701
1649 if self.__B is None: __B_shape = (0, 0) # noqa: E241,E701
1650 elif hasattr(self.__B, "shape"):
1651 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape # noqa: E241,E701
1652 else: __B_shape = self.__B.shape() # noqa: E241,E701
1653 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !") # noqa: E701
1655 if self.__R is None: __R_shape = (0, 0) # noqa: E241,E701
1656 elif hasattr(self.__R, "shape"):
1657 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape # noqa: E241,E701
1658 else: __R_shape = self.__R.shape() # noqa: E241,E701
1659 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !") # noqa: E701
1661 if self.__Q is None: __Q_shape = (0, 0) # noqa: E241,E701
1662 elif hasattr(self.__Q, "shape"):
1663 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape # noqa: E241,E701
1664 else: __Q_shape = self.__Q.shape() # noqa: E241,E701
1665 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !") # noqa: E701
1667 if len(self.__HO) == 0: __HO_shape = (0, 0) # noqa: E241,E701
1668 elif isinstance(self.__HO, dict): __HO_shape = (0, 0) # noqa: E241,E701
1669 elif hasattr(self.__HO["Direct"], "shape"):
1670 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape # noqa: E241,E701
1671 else: __HO_shape = self.__HO["Direct"].shape() # noqa: E241,E701
1672 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !") # noqa: E701
1674 if len(self.__EM) == 0: __EM_shape = (0, 0) # noqa: E241,E701
1675 elif isinstance(self.__EM, dict): __EM_shape = (0, 0) # noqa: E241,E701
1676 elif hasattr(self.__EM["Direct"], "shape"):
1677 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape # noqa: E241,E701
1678 else: __EM_shape = self.__EM["Direct"].shape() # noqa: E241,E701
1679 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !") # noqa: E241,E70
1681 if len(self.__CM) == 0: __CM_shape = (0, 0) # noqa: E241,E701
1682 elif isinstance(self.__CM, dict): __CM_shape = (0, 0) # noqa: E241,E701
1683 elif hasattr(self.__CM["Direct"], "shape"):
1684 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape # noqa: E241,E701
1685 else: __CM_shape = self.__CM["Direct"].shape() # noqa: E241,E701
1686 else: raise TypeError("The control model (CM) has no attribute of shape: problem !") # noqa: E701
1688 # Vérification des conditions
1689 # ---------------------------
1690 if not ( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1691 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1692 if not ( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1693 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1695 if not ( min(__B_shape) == max(__B_shape) ):
1696 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1697 if not ( min(__R_shape) == max(__R_shape) ):
1698 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1699 if not ( min(__Q_shape) == max(__Q_shape) ):
1700 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1701 if not ( min(__EM_shape) == max(__EM_shape) ):
1702 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1704 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not ( __HO_shape[1] == max(__Xb_shape) ):
1706 "Shape characteristic of observation operator (H)" + \
1707 " \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape, __Xb_shape))
1708 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not ( __HO_shape[0] == max(__Y_shape) ):
1710 "Shape characteristic of observation operator (H)" + \
1711 " \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape, __Y_shape))
1712 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not ( __HO_shape[1] == __B_shape[0] ):
1714 "Shape characteristic of observation operator (H)" + \
1715 " \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape, __B_shape))
1716 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not ( __HO_shape[0] == __R_shape[1] ):
1718 "Shape characteristic of observation operator (H)" + \
1719 " \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape, __R_shape))
1721 if self.__B is not None and len(self.__B) > 0 and not ( __B_shape[1] == max(__Xb_shape) ):
1722 if self.__algorithmName in ["EnsembleBlue",]:
1723 asPersistentVector = self.__Xb.reshape((-1, min(__B_shape)))
1724 self.__Xb = Persistence.OneVector("Background")
1725 for member in asPersistentVector:
1726 self.__Xb.store( numpy.asarray(member, dtype=float) )
1727 __Xb_shape = min(__B_shape)
1730 "Shape characteristic of a priori errors covariance matrix (B)" + \
1731 " \"%s\" and background vector (Xb) \"%s\" are incompatible."%(__B_shape, __Xb_shape))
1733 if self.__R is not None and len(self.__R) > 0 and not ( __R_shape[1] == max(__Y_shape) ):
1735 "Shape characteristic of observation errors covariance matrix (R)" + \
1736 " \"%s\" and observation vector (Y) \"%s\" are incompatible."%(__R_shape, __Y_shape))
1738 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not ( __EM_shape[1] == max(__Xb_shape) ):
1740 "Shape characteristic of evolution model (EM)" + \
1741 " \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape, __Xb_shape))
1743 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not ( __CM_shape[1] == max(__U_shape) ):
1745 "Shape characteristic of control model (CM)" + \
1746 " \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape, __U_shape))
1748 if ("Bounds" in self.__P) \
1749 and isinstance(self.__P["Bounds"], (list, tuple)) \
1750 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1751 if len(self.__P["Bounds"]) > 0:
1752 raise ValueError("The number '%s' of bound pairs for the state components is different from the size '%s' of the state (X) itself." \
1753 % (len(self.__P["Bounds"]), max(__Xb_shape)))
1755 self.__P["Bounds"] = None
1756 if ("Bounds" in self.__P) \
1757 and isinstance(self.__P["Bounds"], (numpy.ndarray, numpy.matrix)) \
1758 and (self.__P["Bounds"].shape[0] != max(__Xb_shape)):
1759 if self.__P["Bounds"].size > 0:
1760 raise ValueError("The number '%s' of bound pairs for the state components is different from the size '%s' of the state (X) itself." \
1761 % (self.__P["Bounds"].shape[0], max(__Xb_shape)))
1763 self.__P["Bounds"] = None
1765 if ("BoxBounds" in self.__P) \
1766 and isinstance(self.__P["BoxBounds"], (list, tuple)) \
1767 and (len(self.__P["BoxBounds"]) != max(__Xb_shape)):
1768 raise ValueError("The number '%s' of bound pairs for the state box components is different from the size '%s' of the state (X) itself." \
1769 % (len(self.__P["BoxBounds"]), max(__Xb_shape)))
1770 if ("BoxBounds" in self.__P) \
1771 and isinstance(self.__P["BoxBounds"], (numpy.ndarray, numpy.matrix)) \
1772 and (self.__P["BoxBounds"].shape[0] != max(__Xb_shape)):
1773 raise ValueError("The number '%s' of bound pairs for the state box components is different from the size '%s' of the state (X) itself." \
1774 % (self.__P["BoxBounds"].shape[0], max(__Xb_shape)))
1776 if ("StateBoundsForQuantiles" in self.__P) \
1777 and isinstance(self.__P["StateBoundsForQuantiles"], (list, tuple)) \
1778 and (len(self.__P["StateBoundsForQuantiles"]) != max(__Xb_shape)):
1779 raise ValueError("The number '%s' of bound pairs for the quantile state components is different from the size '%s' of the state (X) itself." \
1780 % (len(self.__P["StateBoundsForQuantiles"]), max(__Xb_shape)))
1784 # ==============================================================================
1785 class RegulationAndParameters(object):
1787 Classe générale d'interface d'action pour la régulation et ses paramètres
1789 __slots__ = ("__name", "__P")
1792 name = "GenericRegulation",
1798 self.__name = str(name)
1801 if asAlgorithm is None and asScript is not None:
1802 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1804 __Algo = asAlgorithm
1806 if asDict is None and asScript is not None:
1807 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1811 if __Dict is not None:
1812 self.__P.update( dict(__Dict) )
1814 if __Algo is not None:
1815 self.__P.update( {"Algorithm": str(__Algo)} )
1817 def get(self, key = None):
1818 "Vérifie l'existence d'une clé de variable ou de paramètres"
1820 return self.__P[key]
1824 # ==============================================================================
1825 class DataObserver(object):
1827 Classe générale d'interface de type observer
1829 __slots__ = ("__name", "__V", "__O", "__I")
1832 name = "GenericObserver",
1845 self.__name = str(name)
1850 if onVariable is None:
1851 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1852 elif isinstance(onVariable, (tuple, list)):
1853 self.__V = tuple(map( str, onVariable ))
1854 if withInfo is None:
1856 elif crossObs or isinstance(withInfo, (tuple, list)):
1859 self.__I = (str(withInfo),) * len(self.__V)
1860 elif isinstance(onVariable, str):
1861 self.__V = (onVariable,)
1862 if withInfo is None:
1863 self.__I = (onVariable,)
1865 self.__I = (str(withInfo),)
1867 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1869 if asObsObject is not None:
1870 self.__O = asObsObject
1872 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1873 __Function = Observer2Func(__FunctionText)
1874 self.__O = __Function.getfunc()
1876 for k in range(len(self.__V)):
1877 if self.__V[k] not in withAlgo:
1878 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%self.__V[k])
1881 withAlgo.setCrossObserver(self.__V, self.__O, self.__I, syncObs, scheduledBy)
1883 for k in range(len(self.__V)):
1884 withAlgo.setObserver(self.__V[k], self.__O, self.__I[k], syncObs, scheduledBy)
1887 "x.__repr__() <==> repr(x)"
1888 return repr(self.__V) + "\n" + repr(self.__O)
1891 "x.__str__() <==> str(x)"
1892 return str(self.__V) + "\n" + str(self.__O)
1894 # ==============================================================================
1895 class UserScript(object):
1897 Classe générale d'interface de type texte de script utilisateur
1899 __slots__ = ("__name", "__F")
1902 name = "GenericUserScript",
1908 self.__name = str(name)
1910 if asString is not None:
1912 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1913 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1914 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1915 self.__F = Templates.ObserverTemplates[asTemplate]
1916 elif asScript is not None:
1917 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1922 "x.__repr__() <==> repr(x)"
1923 return repr(self.__F)
1926 "x.__str__() <==> str(x)"
1927 return str(self.__F)
1929 # ==============================================================================
1930 class ExternalParameters(object):
1932 Classe générale d'interface pour le stockage des paramètres externes
1934 __slots__ = ("__name", "__P")
1937 name = "GenericExternalParameters",
1942 self.__name = str(name)
1945 self.updateParameters( asDict, asScript )
1947 def updateParameters(self, asDict = None, asScript = None ):
1948 "Mise à jour des paramètres"
1949 if asDict is None and asScript is not None:
1950 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1954 if __Dict is not None:
1955 self.__P.update( dict(__Dict) )
1957 def get(self, key = None):
1959 return self.__P[key]
1961 return list(self.__P.keys())
1964 return list(self.__P.keys())
1966 def pop(self, k, d):
1967 return self.__P.pop(k, d)
1970 return self.__P.items()
1972 def __contains__(self, key=None):
1973 "D.__contains__(k) -> True if D has a key k, else False"
1974 return key in self.__P
1976 # ==============================================================================
1977 class State(object):
1979 Classe générale d'interface de type état
1982 "__name", "__check", "__V", "__T", "__is_vector", "__is_series",
1987 name = "GenericVector",
1989 asPersistentVector = None,
1995 toBeChecked = False ):
1997 Permet de définir un vecteur :
1998 - asVector : entrée des données, comme un vecteur compatible avec le
1999 constructeur de numpy.matrix, ou "True" si entrée par script.
2000 - asPersistentVector : entrée des données, comme une série de vecteurs
2001 compatible avec le constructeur de numpy.matrix, ou comme un objet de
2002 type Persistence, ou "True" si entrée par script.
2003 - asScript : si un script valide est donné contenant une variable
2004 nommée "name", la variable est de type "asVector" (par défaut) ou
2005 "asPersistentVector" selon que l'une de ces variables est placée à
2007 - asDataFile : si un ou plusieurs fichiers valides sont donnés
2008 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
2009 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
2010 nommée "name"), on récupère les colonnes et on les range ligne après
2011 ligne (colMajor=False, par défaut) ou colonne après colonne
2012 (colMajor=True). La variable résultante est de type "asVector" (par
2013 défaut) ou "asPersistentVector" selon que l'une de ces variables est
2016 self.__name = str(name)
2017 self.__check = bool(toBeChecked)
2021 self.__is_vector = False
2022 self.__is_series = False
2024 if asScript is not None:
2025 __Vector, __Series = None, None
2026 if asPersistentVector:
2027 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2029 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2030 elif asDataFile is not None:
2031 __Vector, __Series = None, None
2032 if asPersistentVector:
2033 if colNames is not None:
2034 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
2036 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
2037 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
2038 __Series = numpy.transpose(__Series)
2039 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
2040 __Series = numpy.transpose(__Series)
2042 if colNames is not None:
2043 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
2045 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
2047 __Vector = numpy.ravel(__Vector, order = "F")
2049 __Vector = numpy.ravel(__Vector, order = "C")
2051 __Vector, __Series = asVector, asPersistentVector
2053 if __Vector is not None:
2054 self.__is_vector = True
2055 if isinstance(__Vector, str):
2056 __Vector = PlatformInfo.strvect2liststr( __Vector )
2057 self.__V = numpy.ravel(numpy.asarray( __Vector, dtype=float )).reshape((-1, 1))
2058 self.shape = self.__V.shape
2059 self.size = self.__V.size
2060 elif __Series is not None:
2061 self.__is_series = True
2062 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
2063 self.__V = Persistence.OneVector(self.__name)
2064 if isinstance(__Series, str):
2065 __Series = PlatformInfo.strmatrix2liststr(__Series)
2066 for member in __Series:
2067 if isinstance(member, str):
2068 member = PlatformInfo.strvect2liststr( member )
2069 self.__V.store(numpy.asarray( member, dtype=float ))
2072 if isinstance(self.__V.shape, (tuple, list)):
2073 self.shape = self.__V.shape
2075 self.shape = self.__V.shape()
2076 if len(self.shape) == 1:
2077 self.shape = (self.shape[0], 1)
2078 self.size = self.shape[0] * self.shape[1]
2081 "The %s object is improperly defined or undefined,"%self.__name + \
2082 " it requires at minima either a vector, a list/tuple of" + \
2083 " vectors or a persistent object. Please check your vector input.")
2085 if scheduledBy is not None:
2086 self.__T = scheduledBy
2088 def getO(self, withScheduler=False):
2090 return self.__V, self.__T
2091 elif self.__T is None:
2097 "Vérification du type interne"
2098 return self.__is_vector
2101 "Vérification du type interne"
2102 return self.__is_series
2105 "x.__repr__() <==> repr(x)"
2106 return repr(self.__V)
2109 "x.__str__() <==> str(x)"
2110 return str(self.__V)
2112 # ==============================================================================
2113 class Covariance(object):
2115 Classe générale d'interface de type covariance
2118 "__name", "__check", "__C", "__is_scalar", "__is_vector", "__is_matrix",
2119 "__is_object", "shape", "size",
2123 name = "GenericCovariance",
2124 asCovariance = None,
2125 asEyeByScalar = None,
2126 asEyeByVector = None,
2129 toBeChecked = False ):
2131 Permet de définir une covariance :
2132 - asCovariance : entrée des données, comme une matrice compatible avec
2133 le constructeur de numpy.matrix
2134 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
2135 multiplicatif d'une matrice de corrélation identité, aucune matrice
2136 n'étant donc explicitement à donner
2137 - asEyeByVector : entrée des données comme un seul vecteur de variance,
2138 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
2139 n'étant donc explicitement à donner
2140 - asCovObject : entrée des données comme un objet python, qui a les
2141 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
2142 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
2143 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
2144 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
2145 pleine doit être vérifié
2147 self.__name = str(name)
2148 self.__check = bool(toBeChecked)
2151 self.__is_scalar = False
2152 self.__is_vector = False
2153 self.__is_matrix = False
2154 self.__is_object = False
2156 if asScript is not None:
2157 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
2159 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2161 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2163 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2165 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2167 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
2169 if __Scalar is not None:
2170 if isinstance(__Scalar, str):
2171 __Scalar = PlatformInfo.strvect2liststr( __Scalar )
2172 if len(__Scalar) > 0:
2173 __Scalar = __Scalar[0]
2174 if numpy.array(__Scalar).size != 1:
2176 " The diagonal multiplier given to define a sparse matrix is" + \
2177 " not a unique scalar value.\n Its actual measured size is" + \
2178 " %i. Please check your scalar input."%numpy.array(__Scalar).size)
2179 self.__is_scalar = True
2180 self.__C = numpy.abs( float(__Scalar) )
2183 elif __Vector is not None:
2184 if isinstance(__Vector, str):
2185 __Vector = PlatformInfo.strvect2liststr( __Vector )
2186 self.__is_vector = True
2187 self.__C = numpy.abs( numpy.ravel(numpy.asarray( __Vector, dtype=float )) )
2188 self.shape = (self.__C.size, self.__C.size)
2189 self.size = self.__C.size**2
2190 elif __Matrix is not None:
2191 self.__is_matrix = True
2192 self.__C = numpy.matrix( __Matrix, float )
2193 self.shape = self.__C.shape
2194 self.size = self.__C.size
2195 elif __Object is not None:
2196 self.__is_object = True
2198 for at in ("getT", "getI", "diag", "trace", "__add__", "__sub__", "__neg__", "__matmul__", "__mul__", "__rmatmul__", "__rmul__"):
2199 if not hasattr(self.__C, at):
2200 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name, at))
2201 if hasattr(self.__C, "shape"):
2202 self.shape = self.__C.shape
2205 if hasattr(self.__C, "size"):
2206 self.size = self.__C.size
2214 def __validate(self):
2216 if self.__C is None:
2217 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
2218 if self.ismatrix() and min(self.shape) != max(self.shape):
2219 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name, self.shape))
2220 if self.isobject() and min(self.shape) != max(self.shape):
2221 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name, self.shape))
2222 if self.isscalar() and self.__C <= 0:
2223 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name, self.__C))
2224 if self.isvector() and (self.__C <= 0).any():
2225 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
2226 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
2228 numpy.linalg.cholesky( self.__C )
2230 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
2231 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
2235 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
2238 "Vérification du type interne"
2239 return self.__is_scalar
2242 "Vérification du type interne"
2243 return self.__is_vector
2246 "Vérification du type interne"
2247 return self.__is_matrix
2250 "Vérification du type interne"
2251 return self.__is_object
2256 return Covariance(self.__name + "I", asCovariance = numpy.linalg.inv(self.__C) )
2257 elif self.isvector():
2258 return Covariance(self.__name + "I", asEyeByVector = 1. / self.__C )
2259 elif self.isscalar():
2260 return Covariance(self.__name + "I", asEyeByScalar = 1. / self.__C )
2261 elif self.isobject() and hasattr(self.__C, "getI"):
2262 return Covariance(self.__name + "I", asCovObject = self.__C.getI() )
2264 return None # Indispensable
2269 return Covariance(self.__name + "T", asCovariance = self.__C.T )
2270 elif self.isvector():
2271 return Covariance(self.__name + "T", asEyeByVector = self.__C )
2272 elif self.isscalar():
2273 return Covariance(self.__name + "T", asEyeByScalar = self.__C )
2274 elif self.isobject() and hasattr(self.__C, "getT"):
2275 return Covariance(self.__name + "T", asCovObject = self.__C.getT() )
2277 raise AttributeError("the %s covariance matrix has no getT attribute."%(self.__name,))
2280 "Décomposition de Cholesky"
2282 return Covariance(self.__name + "C", asCovariance = numpy.linalg.cholesky(self.__C) )
2283 elif self.isvector():
2284 return Covariance(self.__name + "C", asEyeByVector = numpy.sqrt( self.__C ) )
2285 elif self.isscalar():
2286 return Covariance(self.__name + "C", asEyeByScalar = numpy.sqrt( self.__C ) )
2287 elif self.isobject() and hasattr(self.__C, "cholesky"):
2288 return Covariance(self.__name + "C", asCovObject = self.__C.cholesky() )
2290 raise AttributeError("the %s covariance matrix has no cholesky attribute."%(self.__name,))
2292 def choleskyI(self):
2293 "Inversion de la décomposition de Cholesky"
2295 return Covariance(self.__name + "H", asCovariance = numpy.linalg.inv(numpy.linalg.cholesky(self.__C)) )
2296 elif self.isvector():
2297 return Covariance(self.__name + "H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
2298 elif self.isscalar():
2299 return Covariance(self.__name + "H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
2300 elif self.isobject() and hasattr(self.__C, "choleskyI"):
2301 return Covariance(self.__name + "H", asCovObject = self.__C.choleskyI() )
2303 raise AttributeError("the %s covariance matrix has no choleskyI attribute."%(self.__name,))
2306 "Racine carrée matricielle"
2309 return Covariance(self.__name + "C", asCovariance = numpy.real(scipy.linalg.sqrtm(self.__C)) )
2310 elif self.isvector():
2311 return Covariance(self.__name + "C", asEyeByVector = numpy.sqrt( self.__C ) )
2312 elif self.isscalar():
2313 return Covariance(self.__name + "C", asEyeByScalar = numpy.sqrt( self.__C ) )
2314 elif self.isobject() and hasattr(self.__C, "sqrtm"):
2315 return Covariance(self.__name + "C", asCovObject = self.__C.sqrtm() )
2317 raise AttributeError("the %s covariance matrix has no sqrtm attribute."%(self.__name,))
2320 "Inversion de la racine carrée matricielle"
2323 return Covariance(self.__name + "H", asCovariance = numpy.linalg.inv(numpy.real(scipy.linalg.sqrtm(self.__C))) )
2324 elif self.isvector():
2325 return Covariance(self.__name + "H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
2326 elif self.isscalar():
2327 return Covariance(self.__name + "H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
2328 elif self.isobject() and hasattr(self.__C, "sqrtmI"):
2329 return Covariance(self.__name + "H", asCovObject = self.__C.sqrtmI() )
2331 raise AttributeError("the %s covariance matrix has no sqrtmI attribute."%(self.__name,))
2333 def diag(self, msize=None):
2334 "Diagonale de la matrice"
2336 return numpy.diag(self.__C)
2337 elif self.isvector():
2339 elif self.isscalar():
2341 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2343 return self.__C * numpy.ones(int(msize))
2344 elif self.isobject() and hasattr(self.__C, "diag"):
2345 return self.__C.diag()
2347 raise AttributeError("the %s covariance matrix has no diag attribute."%(self.__name,))
2349 def trace(self, msize=None):
2350 "Trace de la matrice"
2352 return numpy.trace(self.__C)
2353 elif self.isvector():
2354 return float(numpy.sum(self.__C))
2355 elif self.isscalar():
2357 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2359 return self.__C * int(msize)
2360 elif self.isobject():
2361 return self.__C.trace()
2363 raise AttributeError("the %s covariance matrix has no trace attribute."%(self.__name,))
2365 def asfullmatrix(self, msize=None):
2368 return numpy.asarray(self.__C, dtype=float)
2369 elif self.isvector():
2370 return numpy.asarray( numpy.diag(self.__C), dtype=float )
2371 elif self.isscalar():
2373 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2375 return numpy.asarray( self.__C * numpy.eye(int(msize)), dtype=float )
2376 elif self.isobject() and hasattr(self.__C, "asfullmatrix"):
2377 return self.__C.asfullmatrix()
2379 raise AttributeError("the %s covariance matrix has no asfullmatrix attribute."%(self.__name,))
2381 def assparsematrix(self):
2389 "x.__repr__() <==> repr(x)"
2390 if isinstance(self.__C, numpy.float64):
2391 return repr(float(self.__C))
2393 return repr(self.__C)
2396 "x.__str__() <==> str(x)"
2397 return str(self.__C)
2399 def __add__(self, other):
2400 "x.__add__(y) <==> x+y"
2401 if self.ismatrix() or self.isobject():
2402 return self.__C + numpy.asmatrix(other)
2403 elif self.isvector() or self.isscalar():
2404 _A = numpy.asarray(other)
2405 if len(_A.shape) == 1:
2406 _A.reshape((-1, 1))[::2] += self.__C
2408 _A.reshape(_A.size)[::_A.shape[1] + 1] += self.__C
2409 return numpy.asmatrix(_A)
2411 def __radd__(self, other):
2412 "x.__radd__(y) <==> y+x"
2413 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name, type(other)))
2415 def __sub__(self, other):
2416 "x.__sub__(y) <==> x-y"
2417 if self.ismatrix() or self.isobject():
2418 return self.__C - numpy.asmatrix(other)
2419 elif self.isvector() or self.isscalar():
2420 _A = numpy.asarray(other)
2421 _A.reshape(_A.size)[::_A.shape[1] + 1] = self.__C - _A.reshape(_A.size)[::_A.shape[1] + 1]
2422 return numpy.asmatrix(_A)
2424 def __rsub__(self, other):
2425 "x.__rsub__(y) <==> y-x"
2426 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name, type(other)))
2429 "x.__neg__() <==> -x"
2432 def __matmul__(self, other):
2433 "x.__mul__(y) <==> x@y"
2434 if self.ismatrix() and isinstance(other, (int, float)):
2435 return numpy.asarray(self.__C) * other
2436 elif self.ismatrix() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2437 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2438 return numpy.ravel(self.__C @ numpy.ravel(other))
2439 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2440 return numpy.asarray(self.__C) @ numpy.asarray(other)
2442 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape, numpy.asarray(other).shape, self.__name))
2443 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2444 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2445 return numpy.ravel(self.__C) * numpy.ravel(other)
2446 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2447 return numpy.ravel(self.__C).reshape((-1, 1)) * numpy.asarray(other)
2449 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape, numpy.ravel(other).shape, self.__name))
2450 elif self.isscalar() and isinstance(other, numpy.matrix):
2451 return numpy.asarray(self.__C * other)
2452 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2453 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2454 return self.__C * numpy.ravel(other)
2456 return self.__C * numpy.asarray(other)
2457 elif self.isobject():
2458 return self.__C.__matmul__(other)
2460 raise NotImplementedError("%s covariance matrix __matmul__ method not available for %s type!"%(self.__name, type(other)))
2462 def __mul__(self, other):
2463 "x.__mul__(y) <==> x*y"
2464 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2465 return self.__C * other
2466 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2467 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2468 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2469 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2470 return self.__C * numpy.asmatrix(other)
2473 "operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape, numpy.asmatrix(other).shape, self.__name))
2474 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2475 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2476 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
2477 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2478 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
2481 "operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape, numpy.ravel(other).shape, self.__name))
2482 elif self.isscalar() and isinstance(other, numpy.matrix):
2483 return self.__C * other
2484 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2485 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2486 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2488 return self.__C * numpy.asmatrix(other)
2489 elif self.isobject():
2490 return self.__C.__mul__(other)
2492 raise NotImplementedError(
2493 "%s covariance matrix __mul__ method not available for %s type!"%(self.__name, type(other)))
2495 def __rmatmul__(self, other):
2496 "x.__rmul__(y) <==> y@x"
2497 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2498 return other * self.__C
2499 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2500 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2501 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2502 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2503 return numpy.asmatrix(other) * self.__C
2506 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape, self.shape, self.__name))
2507 elif self.isvector() and isinstance(other, numpy.matrix):
2508 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2509 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2510 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2511 return numpy.asmatrix(numpy.array(other) * self.__C)
2514 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape, self.shape, self.__name))
2515 elif self.isscalar() and isinstance(other, numpy.matrix):
2516 return other * self.__C
2517 elif self.isobject():
2518 return self.__C.__rmatmul__(other)
2520 raise NotImplementedError(
2521 "%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name, type(other)))
2523 def __rmul__(self, other):
2524 "x.__rmul__(y) <==> y*x"
2525 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2526 return other * self.__C
2527 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2528 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2529 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2530 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2531 return numpy.asmatrix(other) * self.__C
2534 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape, self.shape, self.__name))
2535 elif self.isvector() and isinstance(other, numpy.matrix):
2536 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2537 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2538 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2539 return numpy.asmatrix(numpy.array(other) * self.__C)
2542 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape, self.shape, self.__name))
2543 elif self.isscalar() and isinstance(other, numpy.matrix):
2544 return other * self.__C
2545 elif self.isscalar() and isinstance(other, float):
2546 return other * self.__C
2547 elif self.isobject():
2548 return self.__C.__rmul__(other)
2550 raise NotImplementedError(
2551 "%s covariance matrix __rmul__ method not available for %s type!"%(self.__name, type(other)))
2554 "x.__len__() <==> len(x)"
2555 return self.shape[0]
2557 # ==============================================================================
2558 class Observer2Func(object):
2560 Création d'une fonction d'observateur a partir de son texte
2562 __slots__ = ("__corps")
2564 def __init__(self, corps=""):
2565 self.__corps = corps
2567 def func(self, var, info):
2568 "Fonction d'observation"
2572 "Restitution du pointeur de fonction dans l'objet"
2575 # ==============================================================================
2576 class CaseLogger(object):
2578 Conservation des commandes de création d'un cas
2581 "__name", "__objname", "__logSerie", "__switchoff", "__viewers",
2585 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
2586 self.__name = str(__name)
2587 self.__objname = str(__objname)
2588 self.__logSerie = []
2589 self.__switchoff = False
2591 "TUI": Interfaces._TUIViewer,
2592 "SCD": Interfaces._SCDViewer,
2593 "YACS": Interfaces._YACSViewer,
2594 "SimpleReportInRst": Interfaces._SimpleReportInRstViewer,
2595 "SimpleReportInHtml": Interfaces._SimpleReportInHtmlViewer,
2596 "SimpleReportInPlainTxt": Interfaces._SimpleReportInPlainTxtViewer,
2599 "TUI": Interfaces._TUIViewer,
2600 "COM": Interfaces._COMViewer,
2602 if __addViewers is not None:
2603 self.__viewers.update(dict(__addViewers))
2604 if __addLoaders is not None:
2605 self.__loaders.update(dict(__addLoaders))
2607 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
2608 "Enregistrement d'une commande individuelle"
2609 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
2610 if "self" in __keys:
2611 __keys.remove("self")
2612 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
2614 self.__switchoff = True
2616 self.__switchoff = False
2618 def dump(self, __filename=None, __format="TUI", __upa=""):
2619 "Restitution normalisée des commandes"
2620 if __format in self.__viewers:
2621 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2623 raise ValueError("Dumping as \"%s\" is not available"%__format)
2624 return __formater.dump(__filename, __upa)
2626 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2627 "Chargement normalisé des commandes"
2628 if __format in self.__loaders:
2629 __formater = self.__loaders[__format]()
2631 raise ValueError("Loading as \"%s\" is not available"%__format)
2632 return __formater.load(__filename, __content, __object)
2634 # ==============================================================================
2637 _extraArguments = None,
2638 _sFunction = lambda x: x,
2640 _mpWorkers = None ):
2642 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2643 correspondante de valeurs de la fonction en argument
2645 # Vérifications et définitions initiales
2646 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2647 if not PlatformInfo.isIterable( __xserie ):
2648 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2650 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2653 __mpWorkers = int(_mpWorkers)
2655 import multiprocessing
2666 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2667 with multiprocessing.Pool(__mpWorkers) as pool:
2668 __multiHX = pool.map( _sFunction, _jobs )
2671 # logging.debug("MULTF Internal multiprocessing calculation end")
2673 # logging.debug("MULTF Internal monoprocessing calculation begin")
2675 if _extraArguments is None:
2676 for __xvalue in __xserie:
2677 __multiHX.append( _sFunction( __xvalue ) )
2678 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2679 for __xvalue in __xserie:
2680 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2681 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2682 for __xvalue in __xserie:
2683 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2685 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2686 # logging.debug("MULTF Internal monoprocessing calculation end")
2688 # logging.debug("MULTF Internal multifonction calculations end")
2691 # ==============================================================================
2692 if __name__ == "__main__":
2693 print("\n AUTODIAGNOSTIC\n")