1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2024 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
36 from functools import partial
37 from daCore import Persistence
38 from daCore import PlatformInfo
39 from daCore import Interfaces
40 from daCore import Templates
42 # ==============================================================================
43 class CacheManager(object):
45 Classe générale de gestion d'un cache de calculs
48 "__tolerBP", "__lengthOR", "__initlnOR", "__seenNames", "__enabled",
53 toleranceInRedundancy = 1.e-18,
54 lengthOfRedundancy = -1 ):
56 Les caractéristiques de tolérance peuvent être modifiées à la création.
58 self.__tolerBP = float(toleranceInRedundancy)
59 self.__lengthOR = int(lengthOfRedundancy)
60 self.__initlnOR = self.__lengthOR
70 def wasCalculatedIn(self, xValue, oName="" ):
71 "Vérifie l'existence d'un calcul correspondant à la valeur"
75 for i in range(min(len(self.__listOPCV), self.__lengthOR) - 1, -1, -1):
76 if not hasattr(xValue, 'size'):
78 elif (str(oName) != self.__listOPCV[i][3]):
80 elif (xValue.size != self.__listOPCV[i][0].size):
82 elif (numpy.ravel(xValue)[0] - self.__listOPCV[i][0][0]) > (self.__tolerBP * self.__listOPCV[i][2] / self.__listOPCV[i][0].size):
84 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < (self.__tolerBP * self.__listOPCV[i][2]):
86 __HxV = self.__listOPCV[i][1]
90 def storeValueInX(self, xValue, HxValue, oName="" ):
91 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
92 if self.__lengthOR < 0:
93 self.__lengthOR = 2 * min(numpy.size(xValue), 50) + 2
94 self.__initlnOR = self.__lengthOR
95 self.__seenNames.append(str(oName))
96 if str(oName) not in self.__seenNames: # Étend la liste si nouveau
97 self.__lengthOR += 2 * min(numpy.size(xValue), 50) + 2
98 self.__initlnOR += self.__lengthOR
99 self.__seenNames.append(str(oName))
100 while len(self.__listOPCV) > self.__lengthOR:
101 self.__listOPCV.pop(0)
102 self.__listOPCV.append((
103 copy.copy(numpy.ravel(xValue)), # 0 Previous point
104 copy.copy(HxValue), # 1 Previous value
105 numpy.linalg.norm(xValue), # 2 Norm
106 str(oName), # 3 Operator name
111 self.__initlnOR = self.__lengthOR
113 self.__enabled = False
117 self.__lengthOR = self.__initlnOR
118 self.__enabled = True
120 # ==============================================================================
121 class Operator(object):
123 Classe générale d'interface de type opérateur simple
126 "__name", "__NbCallsAsMatrix", "__NbCallsAsMethod",
127 "__NbCallsOfCached", "__reduceM", "__avoidRC", "__inputAsMF",
128 "__mpEnabled", "__extraArgs", "__Method", "__Matrix", "__Type",
137 name = "GenericOperator",
140 avoidingRedundancy = True,
141 reducingMemoryUse = False,
142 inputAsMultiFunction = False,
143 enableMultiProcess = False,
144 extraArguments = None ):
146 On construit un objet de ce type en fournissant, à l'aide de l'un des
147 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
150 - name : nom d'opérateur
151 - fromMethod : argument de type fonction Python
152 - fromMatrix : argument adapté au constructeur numpy.array/matrix
153 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
154 - reducingMemoryUse : booléen forçant (ou pas) des calculs moins
156 - inputAsMultiFunction : booléen indiquant une fonction explicitement
157 définie (ou pas) en multi-fonction
158 - extraArguments : arguments supplémentaires passés à la fonction de
159 base et ses dérivées (tuple ou dictionnaire)
161 self.__name = str(name)
162 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
163 self.__reduceM = bool( reducingMemoryUse )
164 self.__avoidRC = bool( avoidingRedundancy )
165 self.__inputAsMF = bool( inputAsMultiFunction )
166 self.__mpEnabled = bool( enableMultiProcess )
167 self.__extraArgs = extraArguments
168 if fromMethod is not None and self.__inputAsMF:
169 self.__Method = fromMethod # logtimer(fromMethod)
171 self.__Type = "Method"
172 elif fromMethod is not None and not self.__inputAsMF:
173 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
175 self.__Type = "Method"
176 elif fromMatrix is not None:
178 if isinstance(fromMatrix, str):
179 fromMatrix = PlatformInfo.strmatrix2liststr( fromMatrix )
180 self.__Matrix = numpy.asarray( fromMatrix, dtype=float )
181 self.__Type = "Matrix"
187 def disableAvoidingRedundancy(self):
189 Operator.CM.disable()
191 def enableAvoidingRedundancy(self):
196 Operator.CM.disable()
202 def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False):
204 Permet de restituer le résultat de l'application de l'opérateur à une
205 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
206 argument devant a priori être du bon type.
208 - les arguments par série sont :
209 - xValue : argument adapté pour appliquer l'opérateur
210 - HValue : valeur précalculée de l'opérateur en ce point
211 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
218 if HValue is not None:
222 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
224 if _HValue is not None:
225 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
227 for i in range(len(_HValue)):
228 _HxValue.append( _HValue[i] )
230 Operator.CM.storeValueInX(_xValue[i], _HxValue[-1], self.__name)
235 for i, xv in enumerate(_xValue):
237 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv, self.__name)
239 __alreadyCalculated = False
241 if __alreadyCalculated:
242 self.__addOneCacheCall()
245 if self.__Matrix is not None:
246 self.__addOneMatrixCall()
247 _hv = self.__Matrix @ numpy.ravel(xv)
249 self.__addOneMethodCall()
253 _HxValue.append( _hv )
255 if len(_xserie) > 0 and self.__Matrix is None:
256 if self.__extraArgs is None:
257 _hserie = self.__Method( _xserie ) # Calcul MF
259 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
260 if not hasattr(_hserie, "pop"):
262 "The user input multi-function doesn't seem to return a" + \
263 " result sequence, behaving like a mono-function. It has" + \
270 Operator.CM.storeValueInX(_xv, _hv, self.__name)
272 if returnSerieAsArrayMatrix:
273 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
275 if argsAsSerie: return _HxValue # noqa: E701
276 else: return _HxValue[-1] # noqa: E241,E272,E701
278 def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
280 Permet de restituer le résultat de l'application de l'opérateur à des
281 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
282 argument devant a priori être du bon type. Si la uValue est None,
283 on suppose que l'opérateur ne s'applique qu'à xValue.
285 - paires : les arguments par paire sont :
286 - xValue : argument X adapté pour appliquer l'opérateur
287 - uValue : argument U adapté pour appliquer l'opérateur
288 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
290 if argsAsSerie: _xuValue = paires # noqa: E701
291 else: _xuValue = (paires,) # noqa: E241,E272,E701
292 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
294 if self.__Matrix is not None:
296 for paire in _xuValue:
297 _xValue, _uValue = paire
298 self.__addOneMatrixCall()
299 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
302 for paire in _xuValue:
303 _xValue, _uValue = paire
304 if _uValue is not None:
305 _xuArgs.append( paire )
307 _xuArgs.append( _xValue )
308 self.__addOneMethodCall( len(_xuArgs) )
309 if self.__extraArgs is None:
310 _HxValue = self.__Method( _xuArgs ) # Calcul MF
312 _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF
314 if returnSerieAsArrayMatrix:
315 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
317 if argsAsSerie: return _HxValue # noqa: E701
318 else: return _HxValue[-1] # noqa: E241,E272,E701
320 def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
322 Permet de restituer le résultat de l'application de l'opérateur à une
323 série d'arguments xValue, sachant que l'opérateur est valable en
324 xNominal. Cette méthode se contente d'appliquer, son argument devant a
325 priori être du bon type. Si l'opérateur est linéaire car c'est une
326 matrice, alors il est valable en tout point nominal et xNominal peut
327 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
328 permet d'indiquer que l'argument est multi-paires.
330 - paires : les arguments par paire sont :
331 - xNominal : série d'arguments permettant de donner le point où
332 l'opérateur est construit pour être ensuite appliqué
333 - xValue : série d'arguments adaptés pour appliquer l'opérateur
334 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
336 if argsAsSerie: _nxValue = paires # noqa: E701
337 else: _nxValue = (paires,) # noqa: E241,E272,E701
338 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
340 if self.__Matrix is not None:
342 for paire in _nxValue:
343 _xNominal, _xValue = paire
344 self.__addOneMatrixCall()
345 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
347 self.__addOneMethodCall( len(_nxValue) )
348 if self.__extraArgs is None:
349 _HxValue = self.__Method( _nxValue ) # Calcul MF
351 _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
353 if returnSerieAsArrayMatrix:
354 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
356 if argsAsSerie: return _HxValue # noqa: E701
357 else: return _HxValue[-1] # noqa: E241,E272,E701
359 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
361 Permet de renvoyer l'opérateur sous la forme d'une matrice
363 if self.__Matrix is not None:
364 self.__addOneMatrixCall()
365 mValue = [self.__Matrix,]
366 elif not isinstance(ValueForMethodForm, str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
369 self.__addOneMethodCall( len(ValueForMethodForm) )
370 for _vfmf in ValueForMethodForm:
371 mValue.append( self.__Method(((_vfmf, None),)) )
373 self.__addOneMethodCall()
374 mValue = self.__Method(((ValueForMethodForm, None),))
376 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
378 if argsAsSerie: return mValue # noqa: E701
379 else: return mValue[-1] # noqa: E241,E272,E701
383 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
384 la forme d'une matrice
386 if self.__Matrix is not None:
387 return self.__Matrix.shape
389 raise ValueError("Matrix form of the operator is not available, nor the shape")
391 def nbcalls(self, which=None):
393 Renvoie les nombres d'évaluations de l'opérateur
396 self.__NbCallsAsMatrix + self.__NbCallsAsMethod,
397 self.__NbCallsAsMatrix,
398 self.__NbCallsAsMethod,
399 self.__NbCallsOfCached,
400 Operator.NbCallsAsMatrix + Operator.NbCallsAsMethod,
401 Operator.NbCallsAsMatrix,
402 Operator.NbCallsAsMethod,
403 Operator.NbCallsOfCached,
405 if which is None: return __nbcalls # noqa: E701
406 else: return __nbcalls[which] # noqa: E241,E272,E701
408 def __addOneMatrixCall(self):
409 "Comptabilise un appel"
410 self.__NbCallsAsMatrix += 1 # Decompte local
411 Operator.NbCallsAsMatrix += 1 # Decompte global
413 def __addOneMethodCall(self, nb = 1):
414 "Comptabilise un appel"
415 self.__NbCallsAsMethod += nb # Decompte local
416 Operator.NbCallsAsMethod += nb # Decompte global
418 def __addOneCacheCall(self):
419 "Comptabilise un appel"
420 self.__NbCallsOfCached += 1 # Décompte local
421 Operator.NbCallsOfCached += 1 # Décompte global
423 # ==============================================================================
424 class FullOperator(object):
426 Classe générale d'interface de type opérateur complet
427 (Direct, Linéaire Tangent, Adjoint)
430 "__name", "__check", "__extraArgs", "__FO", "__T",
434 name = "GenericFullOperator",
436 asOneFunction = None, # 1 Fonction
437 asThreeFunctions = None, # 3 Fonctions in a dictionary
438 asScript = None, # 1 or 3 Fonction(s) by script
439 asDict = None, # Parameters
441 extraArguments = None,
442 performancePrf = None,
443 inputAsMF = False, # Fonction(s) as Multi-Functions
445 toBeChecked = False ):
447 self.__name = str(name)
448 self.__check = bool(toBeChecked)
449 self.__extraArgs = extraArguments
454 if (asDict is not None) and isinstance(asDict, dict):
455 __Parameters.update( asDict )
456 # Deprecated parameters
457 __Parameters = self.__deprecateOpt(
458 collection = __Parameters,
459 oldn = "EnableMultiProcessing",
460 newn = "EnableWiseParallelism",
462 __Parameters = self.__deprecateOpt(
463 collection = __Parameters,
464 oldn = "EnableMultiProcessingInEvaluation",
465 newn = "EnableParallelEvaluations",
467 __Parameters = self.__deprecateOpt(
468 collection = __Parameters,
469 oldn = "EnableMultiProcessingInDerivatives",
470 newn = "EnableParallelDerivatives",
472 # Priorité à EnableParallelDerivatives=True
473 if "EnableWiseParallelism" in __Parameters and __Parameters["EnableWiseParallelism"]:
474 __Parameters["EnableParallelDerivatives"] = True
475 __Parameters["EnableParallelEvaluations"] = False
476 if "EnableParallelDerivatives" not in __Parameters:
477 __Parameters["EnableParallelDerivatives"] = False
478 if __Parameters["EnableParallelDerivatives"]:
479 __Parameters["EnableParallelEvaluations"] = False
480 if "EnableParallelEvaluations" not in __Parameters:
481 __Parameters["EnableParallelEvaluations"] = False
482 if "withIncrement" in __Parameters: # Temporaire
483 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
485 __reduceM, __avoidRC = True, True # Défaut
486 if performancePrf is not None:
487 if performancePrf == "ReducedAmountOfCalculation":
488 __reduceM, __avoidRC = False, True
489 elif performancePrf == "ReducedMemoryFootprint":
490 __reduceM, __avoidRC = True, False
491 elif performancePrf == "NoSavings":
492 __reduceM, __avoidRC = False, False
493 # "ReducedOverallRequirements" et tous les autres choix (y.c rien)
494 # sont équivalents au défaut
496 if asScript is not None:
497 __Matrix, __Function = None, None
499 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
501 __Function = { "Direct": Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
502 __Function.update({"useApproximatedDerivatives": True})
503 __Function.update(__Parameters)
504 elif asThreeFunctions:
506 "Direct": Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
507 "Tangent": Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
508 "Adjoint": Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
510 __Function.update(__Parameters)
513 if asOneFunction is not None:
514 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
515 if asOneFunction["Direct"] is not None:
516 __Function = asOneFunction
518 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
520 __Function = { "Direct": asOneFunction }
521 __Function.update({"useApproximatedDerivatives": True})
522 __Function.update(__Parameters)
523 elif asThreeFunctions is not None:
524 if isinstance(asThreeFunctions, dict) and \
525 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
526 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
527 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
528 __Function = asThreeFunctions
529 elif isinstance(asThreeFunctions, dict) and \
530 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
531 __Function = asThreeFunctions
532 __Function.update({"useApproximatedDerivatives": True})
535 "The functions has to be given in a dictionnary which have either" + \
536 " 1 key (\"Direct\") or" + \
537 " 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
538 if "Direct" not in asThreeFunctions:
539 __Function["Direct"] = asThreeFunctions["Tangent"]
540 __Function.update(__Parameters)
544 if appliedInX is not None and isinstance(appliedInX, dict):
545 __appliedInX = appliedInX
546 elif appliedInX is not None:
547 __appliedInX = {"HXb": appliedInX}
551 if scheduledBy is not None:
552 self.__T = scheduledBy
554 if isinstance(__Function, dict) and \
555 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
556 ("Direct" in __Function) and (__Function["Direct"] is not None):
557 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False # noqa: E272,E701
558 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01 # noqa: E272,E701
559 if "withdX" not in __Function: __Function["withdX"] = None # noqa: E272,E701
560 if "withReducingMemoryUse" not in __Function: __Function["withReducingMemoryUse"] = __reduceM # noqa: E272,E701
561 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = __avoidRC # noqa: E272,E701
562 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18 # noqa: E272,E701
563 if "withLengthOfRedundancy" not in __Function: __Function["withLengthOfRedundancy"] = -1 # noqa: E272,E701
564 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None # noqa: E272,E701
565 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF # noqa: E272,E701
566 from daCore import NumericObjects
567 FDA = NumericObjects.FDApproximation(
569 Function = __Function["Direct"],
570 centeredDF = __Function["CenteredFiniteDifference"],
571 increment = __Function["DifferentialIncrement"],
572 dX = __Function["withdX"],
573 extraArguments = self.__extraArgs,
574 reducingMemoryUse = __Function["withReducingMemoryUse"],
575 avoidingRedundancy = __Function["withAvoidingRedundancy"],
576 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
577 lengthOfRedundancy = __Function["withLengthOfRedundancy"],
578 mpEnabled = __Function["EnableParallelDerivatives"],
579 mpWorkers = __Function["NumberOfProcesses"],
580 mfEnabled = __Function["withmfEnabled"],
582 self.__FO["Direct"] = Operator(
584 fromMethod = FDA.DirectOperator,
585 reducingMemoryUse = __reduceM,
586 avoidingRedundancy = __avoidRC,
587 inputAsMultiFunction = inputAsMF,
588 extraArguments = self.__extraArgs,
589 enableMultiProcess = __Parameters["EnableParallelEvaluations"] )
590 self.__FO["Tangent"] = Operator(
591 name = self.__name + "Tangent",
592 fromMethod = FDA.TangentOperator,
593 reducingMemoryUse = __reduceM,
594 avoidingRedundancy = __avoidRC,
595 inputAsMultiFunction = inputAsMF,
596 extraArguments = self.__extraArgs )
597 self.__FO["Adjoint"] = Operator(
598 name = self.__name + "Adjoint",
599 fromMethod = FDA.AdjointOperator,
600 reducingMemoryUse = __reduceM,
601 avoidingRedundancy = __avoidRC,
602 inputAsMultiFunction = inputAsMF,
603 extraArguments = self.__extraArgs )
604 self.__FO["DifferentialIncrement"] = __Function["DifferentialIncrement"]
605 elif isinstance(__Function, dict) and \
606 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
607 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
608 self.__FO["Direct"] = Operator(
610 fromMethod = __Function["Direct"],
611 reducingMemoryUse = __reduceM,
612 avoidingRedundancy = __avoidRC,
613 inputAsMultiFunction = inputAsMF,
614 extraArguments = self.__extraArgs,
615 enableMultiProcess = __Parameters["EnableParallelEvaluations"] )
616 self.__FO["Tangent"] = Operator(
617 name = self.__name + "Tangent",
618 fromMethod = __Function["Tangent"],
619 reducingMemoryUse = __reduceM,
620 avoidingRedundancy = __avoidRC,
621 inputAsMultiFunction = inputAsMF,
622 extraArguments = self.__extraArgs )
623 self.__FO["Adjoint"] = Operator(
624 name = self.__name + "Adjoint",
625 fromMethod = __Function["Adjoint"],
626 reducingMemoryUse = __reduceM,
627 avoidingRedundancy = __avoidRC,
628 inputAsMultiFunction = inputAsMF,
629 extraArguments = self.__extraArgs )
630 self.__FO["DifferentialIncrement"] = None
631 elif asMatrix is not None:
632 if isinstance(__Matrix, str):
633 __Matrix = PlatformInfo.strmatrix2liststr( __Matrix )
634 __matrice = numpy.asarray( __Matrix, dtype=float )
635 self.__FO["Direct"] = Operator(
637 fromMatrix = __matrice,
638 reducingMemoryUse = __reduceM,
639 avoidingRedundancy = __avoidRC,
640 inputAsMultiFunction = inputAsMF,
641 enableMultiProcess = __Parameters["EnableParallelEvaluations"] )
642 self.__FO["Tangent"] = Operator(
643 name = self.__name + "Tangent",
644 fromMatrix = __matrice,
645 reducingMemoryUse = __reduceM,
646 avoidingRedundancy = __avoidRC,
647 inputAsMultiFunction = inputAsMF )
648 self.__FO["Adjoint"] = Operator(
649 name = self.__name + "Adjoint",
650 fromMatrix = __matrice.T,
651 reducingMemoryUse = __reduceM,
652 avoidingRedundancy = __avoidRC,
653 inputAsMultiFunction = inputAsMF )
655 self.__FO["DifferentialIncrement"] = None
658 "The %s object is improperly defined or undefined,"%self.__name + \
659 " it requires at minima either a matrix, a Direct operator for" + \
660 " approximate derivatives or a Tangent/Adjoint operators pair." + \
661 " Please check your operator input.")
663 if __appliedInX is not None:
664 self.__FO["AppliedInX"] = {}
665 for key in __appliedInX:
666 if isinstance(__appliedInX[key], str):
667 __appliedInX[key] = PlatformInfo.strvect2liststr( __appliedInX[key] )
668 self.__FO["AppliedInX"][key] = numpy.ravel( __appliedInX[key] ).reshape((-1, 1))
670 self.__FO["AppliedInX"] = None
675 def nbcalls(self, whot=None, which=None):
677 Renvoie les nombres d'évaluations de l'opérateur
680 for otype in ["Direct", "Tangent", "Adjoint"]:
681 if otype in self.__FO:
682 __nbcalls[otype] = self.__FO[otype].nbcalls()
683 if whot in __nbcalls and which is not None:
684 return __nbcalls[whot][which]
689 "x.__repr__() <==> repr(x)"
690 return repr(self.__FO)
693 "x.__str__() <==> str(x)"
694 return str(self.__FO)
696 def __deprecateOpt(self, collection: dict, oldn: str, newn: str):
697 if oldn in collection:
698 collection[newn] = collection[oldn]
700 __msg = "the parameter \"%s\" used in this case is"%(oldn,)
701 __msg += " deprecated and has to be replaced by \"%s\"."%(newn,)
702 __msg += " Please update your code."
703 warnings.warn(__msg, FutureWarning, stacklevel=50)
706 # ==============================================================================
707 class Algorithm(object):
709 Classe générale d'interface de type algorithme
711 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
712 d'assimilation, en fournissant un container (dictionnaire) de variables
713 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
715 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
718 "_name", "_parameters", "__internal_state", "__required_parameters",
719 "_m", "__variable_names_not_public", "__canonical_parameter_name",
720 "__canonical_stored_name", "__replace_by_the_new_name",
724 def __init__(self, name):
726 L'initialisation présente permet de fabriquer des variables de stockage
727 disponibles de manière générique dans les algorithmes élémentaires. Ces
728 variables de stockage sont ensuite conservées dans un dictionnaire
729 interne à l'objet, mais auquel on accède par la méthode "get".
731 Les variables prévues sont :
732 - APosterioriCorrelations : matrice de corrélations de la matrice A
733 - APosterioriCovariance : matrice de covariances a posteriori : A
734 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
735 - APosterioriVariances : vecteur des variances de la matrice A
736 - Analysis : vecteur d'analyse : Xa
737 - BMA : Background moins Analysis : Xa - Xb
738 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
739 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
740 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
741 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
742 - CostFunctionJo : partie observations de la fonction-coût : Jo
743 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
744 - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
745 - CurrentOptimum : état optimal courant lors d'itérations
746 - CurrentState : état courant lors d'itérations
747 - CurrentStepNumber : pas courant d'avancement dans les algorithmes en évolution, à partir de 0
748 - EnsembleOfSimulations : ensemble d'états (sorties, simulations) rangés par colonne dans une matrice
749 - EnsembleOfSnapshots : ensemble d'états rangés par colonne dans une matrice
750 - EnsembleOfStates : ensemble d'états (entrées, paramètres) rangés par colonne dans une matrice
751 - ForecastCovariance : covariance de l'état prédit courant lors d'itérations
752 - ForecastState : état prédit courant lors d'itérations
753 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
754 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
755 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
756 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
757 - Innovation : l'innovation : d = Y - H(X)
758 - InnovationAtCurrentAnalysis : l'innovation à l'état analysé : da = Y - H(Xa)
759 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
760 - InternalCostFunctionJ : ensemble de valeurs internes de fonction-coût J dans un vecteur
761 - InternalCostFunctionJb : ensemble de valeurs internes de fonction-coût Jb dans un vecteur
762 - InternalCostFunctionJb : ensemble de valeurs internes de fonction-coût Jo dans un vecteur
763 - InternalStates : ensemble d'états internes rangés par colonne dans une matrice (=EnsembleOfSnapshots)
764 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
765 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
766 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
767 - KalmanGainAtOptimum : gain de Kalman à l'optimum
768 - MahalanobisConsistency : indicateur de consistance des covariances
769 - OMA : Observation moins Analyse : Y - Xa
770 - OMB : Observation moins Background : Y - Xb
771 - ReducedCoordinates : coordonnées dans la base réduite
772 - Residu : dans le cas des algorithmes de vérification
773 - SampledStateForQuantiles : échantillons d'états pour l'estimation des quantiles
774 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
775 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
776 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
777 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
778 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
779 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
780 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
781 - SingularValues : valeurs singulières provenant d'une décomposition SVD
782 On peut rajouter des variables à stocker dans l'initialisation de
783 l'algorithme élémentaire qui va hériter de cette classe
785 logging.debug("%s Initialisation", str(name))
786 self._m = PlatformInfo.SystemUsage()
788 self._name = str( name )
789 self._parameters = {"StoreSupplementaryCalculations": []}
790 self.__internal_state = {}
791 self.__required_parameters = {}
792 self.__required_inputs = {
793 "RequiredInputValues": {"mandatory": (), "optional": ()},
794 "AttributesTags": [],
795 "AttributesFeatures": [],
797 self.__variable_names_not_public = {"nextStep": False} # Duplication dans AlgorithmAndParameters
798 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
799 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
800 self.__replace_by_the_new_name = {} # Nouveau nom à partir d'un nom ancien
802 self.StoredVariables = {}
803 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
804 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
805 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
806 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
807 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
808 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
809 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
810 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
811 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
812 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
813 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
814 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
815 self.StoredVariables["CurrentEnsembleState"] = Persistence.OneMatrix(name = "CurrentEnsembleState")
816 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
817 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
818 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
819 self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
820 self.StoredVariables["EnsembleOfSimulations"] = Persistence.OneMatrice(name = "EnsembleOfSimulations")
821 self.StoredVariables["EnsembleOfSnapshots"] = Persistence.OneMatrice(name = "EnsembleOfSnapshots")
822 self.StoredVariables["EnsembleOfStates"] = Persistence.OneMatrice(name = "EnsembleOfStates")
823 self.StoredVariables["ExcludedPoints"] = Persistence.OneVector(name = "ExcludedPoints")
824 self.StoredVariables["ForecastCovariance"] = Persistence.OneMatrix(name = "ForecastCovariance")
825 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
826 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
827 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
828 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
829 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
830 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
831 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
832 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
833 self.StoredVariables["InternalCostFunctionJ"] = Persistence.OneVector(name = "InternalCostFunctionJ")
834 self.StoredVariables["InternalCostFunctionJb"] = Persistence.OneVector(name = "InternalCostFunctionJb")
835 self.StoredVariables["InternalCostFunctionJo"] = Persistence.OneVector(name = "InternalCostFunctionJo")
836 self.StoredVariables["InternalStates"] = Persistence.OneMatrix(name = "InternalStates")
837 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
838 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
839 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
840 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
841 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
842 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
843 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
844 self.StoredVariables["OptimalPoints"] = Persistence.OneVector(name = "OptimalPoints")
845 self.StoredVariables["ReducedBasis"] = Persistence.OneMatrix(name = "ReducedBasis")
846 self.StoredVariables["ReducedBasisMus"] = Persistence.OneVector(name = "ReducedBasisMus")
847 self.StoredVariables["ReducedCoordinates"] = Persistence.OneVector(name = "ReducedCoordinates")
848 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
849 self.StoredVariables["Residus"] = Persistence.OneVector(name = "Residus")
850 self.StoredVariables["SampledStateForQuantiles"] = Persistence.OneMatrix(name = "SampledStateForQuantiles")
851 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
852 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
853 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
854 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
855 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
856 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
857 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
858 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
859 self.StoredVariables["SingularValues"] = Persistence.OneVector(name = "SingularValues")
861 for k in self.StoredVariables:
862 self.__canonical_stored_name[k.lower()] = k
864 for k, v in self.__variable_names_not_public.items():
865 self.__canonical_parameter_name[k.lower()] = k
866 self.__canonical_parameter_name["algorithm"] = "Algorithm"
867 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
869 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
871 logging.debug("%s Lancement", self._name)
872 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
873 self._getTimeState(reset=True)
875 # Mise à jour des paramètres internes avec le contenu de Parameters, en
876 # reprenant les valeurs par défauts pour toutes celles non définies
877 self.__setParameters(Parameters, reset=True) # Copie
878 for k, v in self.__variable_names_not_public.items():
879 if k not in self._parameters:
880 self.__setParameters( {k: v} )
882 def __test_vvalue(argument, variable, argname, symbol=None):
883 "Corrections et compléments des vecteurs"
887 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
888 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name, argname, symbol))
889 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
890 logging.debug("%s %s vector %s is not set, but is optional."%(self._name, argname, symbol))
892 logging.debug("%s %s vector %s is not set, but is not required."%(self._name, argname, symbol))
894 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
896 "%s %s vector %s is required and set, and its full size is %i." \
897 % (self._name, argname, symbol, numpy.array(argument).size))
898 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
900 "%s %s vector %s is optional and set, and its full size is %i." \
901 % (self._name, argname, symbol, numpy.array(argument).size))
904 "%s %s vector %s is set although neither required nor optional, and its full size is %i." \
905 % (self._name, argname, symbol, numpy.array(argument).size))
907 __test_vvalue( Xb, "Xb", "Background or initial state" )
908 __test_vvalue( Y, "Y", "Observation" )
909 __test_vvalue( U, "U", "Control" )
911 def __test_cvalue(argument, variable, argname, symbol=None):
912 "Corrections et compléments des covariances"
916 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
917 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name, argname, symbol))
918 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
919 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name, argname, symbol))
921 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name, argname, symbol))
923 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
924 logging.debug("%s %s error covariance matrix %s is required and set."%(self._name, argname, symbol))
925 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
926 logging.debug("%s %s error covariance matrix %s is optional and set."%(self._name, argname, symbol))
929 "%s %s error covariance matrix %s is set although neither required nor optional." \
930 % (self._name, argname, symbol))
932 __test_cvalue( B, "B", "Background" )
933 __test_cvalue( R, "R", "Observation" )
934 __test_cvalue( Q, "Q", "Evolution" )
936 def __test_ovalue(argument, variable, argname, symbol=None):
937 "Corrections et compléments des opérateurs"
940 if argument is None or (isinstance(argument, dict) and len(argument) == 0):
941 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
942 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name, argname, symbol))
943 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
944 logging.debug("%s %s operator %s is not set, but is optional."%(self._name, argname, symbol))
946 logging.debug("%s %s operator %s is not set, but is not required."%(self._name, argname, symbol))
948 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
949 logging.debug("%s %s operator %s is required and set."%(self._name, argname, symbol))
950 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
951 logging.debug("%s %s operator %s is optional and set."%(self._name, argname, symbol))
953 logging.debug("%s %s operator %s is set although neither required nor optional."%(self._name, argname, symbol))
955 __test_ovalue( HO, "HO", "Observation", "H" )
956 __test_ovalue( EM, "EM", "Evolution", "M" )
957 __test_ovalue( CM, "CM", "Control Model", "C" )
959 # Corrections et compléments des bornes
960 if ("Bounds" in self._parameters) \
961 and isinstance(self._parameters["Bounds"], (list, tuple)):
962 if (len(self._parameters["Bounds"]) > 0):
963 logging.debug("%s Bounds taken into account"%(self._name,))
965 self._parameters["Bounds"] = None
966 elif ("Bounds" in self._parameters) \
967 and isinstance(self._parameters["Bounds"], (numpy.ndarray, numpy.matrix)):
968 self._parameters["Bounds"] = numpy.ravel(self._parameters["Bounds"]).reshape((-1, 2)).tolist()
969 if (len(self._parameters["Bounds"]) > 0):
970 logging.debug("%s Bounds for states taken into account"%(self._name,))
972 self._parameters["Bounds"] = None
974 self._parameters["Bounds"] = None
975 if self._parameters["Bounds"] is None:
976 logging.debug("%s There are no bounds for states to take into account"%(self._name,))
978 if ("StateBoundsForQuantiles" in self._parameters) \
979 and isinstance(self._parameters["StateBoundsForQuantiles"], (list, tuple)) \
980 and (len(self._parameters["StateBoundsForQuantiles"]) > 0):
981 logging.debug("%s Bounds for quantiles states taken into account"%(self._name,))
982 elif ("StateBoundsForQuantiles" in self._parameters) \
983 and isinstance(self._parameters["StateBoundsForQuantiles"], (numpy.ndarray, numpy.matrix)):
984 self._parameters["StateBoundsForQuantiles"] = numpy.ravel(self._parameters["StateBoundsForQuantiles"]).reshape((-1, 2)).tolist()
985 if (len(self._parameters["StateBoundsForQuantiles"]) > 0):
986 logging.debug("%s Bounds for quantiles states taken into account"%(self._name,))
987 # Attention : contrairement à Bounds, il n'y a pas de défaut à None,
988 # sinon on ne peut pas être sans bornes
990 # Corrections et compléments de l'initialisation en X
991 if "InitializationPoint" in self._parameters:
993 if self._parameters["InitializationPoint"] is not None and hasattr(self._parameters["InitializationPoint"], 'size'):
994 if self._parameters["InitializationPoint"].size != numpy.ravel(Xb).size:
996 "Incompatible size %i of forced initial point that have to replace the background of size %i" \
997 % (self._parameters["InitializationPoint"].size, numpy.ravel(Xb).size))
998 # Obtenu par typecast : numpy.ravel(self._parameters["InitializationPoint"])
1000 self._parameters["InitializationPoint"] = numpy.ravel(Xb)
1002 if self._parameters["InitializationPoint"] is None:
1003 raise ValueError("Forced initial point can not be set without any given Background or required value")
1005 # Correction pour pallier a un bug de TNC sur le retour du Minimum
1006 if "Minimizer" in self._parameters and self._parameters["Minimizer"] == "TNC":
1007 self.setParameterValue("StoreInternalVariables", True)
1009 # Verbosité et logging
1010 if logging.getLogger().level < logging.WARNING:
1011 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
1012 self._parameters["optmessages"] = 15
1014 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
1015 self._parameters["optmessages"] = 0
1019 def _post_run(self, _oH=None, _oM=None):
1021 if ("StoreSupplementaryCalculations" in self._parameters) and \
1022 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
1023 for _A in self.StoredVariables["APosterioriCovariance"]:
1024 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
1025 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
1026 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
1027 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
1028 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
1029 _EI = numpy.diag(1. / numpy.sqrt(numpy.diag(_A)))
1030 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
1031 self.StoredVariables["APosterioriCorrelations"].store( _C )
1032 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
1034 "%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i",
1035 self._name, _oH["Direct"].nbcalls(0), _oH["Tangent"].nbcalls(0), _oH["Adjoint"].nbcalls(0))
1037 "%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i",
1038 self._name, _oH["Direct"].nbcalls(3), _oH["Tangent"].nbcalls(3), _oH["Adjoint"].nbcalls(3))
1039 if _oM is not None and "Direct" in _oM and "Tangent" in _oM and "Adjoint" in _oM:
1041 "%s Nombre d'évaluation(s) de l'opérateur d'évolution direct/tangent/adjoint.: %i/%i/%i",
1042 self._name, _oM["Direct"].nbcalls(0), _oM["Tangent"].nbcalls(0), _oM["Adjoint"].nbcalls(0))
1044 "%s Nombre d'appels au cache d'opérateur d'évolution direct/tangent/adjoint..: %i/%i/%i",
1045 self._name, _oM["Direct"].nbcalls(3), _oM["Tangent"].nbcalls(3), _oM["Adjoint"].nbcalls(3))
1046 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
1047 logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
1048 logging.debug("%s Terminé", self._name)
1051 def _toStore(self, key):
1052 "True if in StoreSupplementaryCalculations, else False"
1053 return key in self._parameters["StoreSupplementaryCalculations"]
1055 def get(self, key=None):
1057 Renvoie l'une des variables stockées identifiée par la clé, ou le
1058 dictionnaire de l'ensemble des variables disponibles en l'absence de
1059 clé. Ce sont directement les variables sous forme objet qui sont
1060 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
1061 des classes de persistance.
1064 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
1066 return self.StoredVariables
1068 def __contains__(self, key=None):
1069 "D.__contains__(k) -> True if D has a key k, else False"
1070 if key is None or key.lower() not in self.__canonical_stored_name:
1073 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
1076 "D.keys() -> list of D's keys"
1077 if hasattr(self, "StoredVariables"):
1078 return self.StoredVariables.keys()
1082 def pop(self, k, d):
1083 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
1084 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
1085 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
1090 raise TypeError("pop expected at least 1 arguments, got 0")
1091 "If key is not found, d is returned if given, otherwise KeyError is raised"
1097 def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
1099 Doit implémenter l'opération élémentaire de calcul algorithmique.
1101 raise NotImplementedError("Mathematical algorithmic calculation has not been implemented!")
1103 def defineRequiredParameter(
1115 Permet de définir dans l'algorithme des paramètres requis et leurs
1116 caractéristiques par défaut.
1119 raise ValueError("A name is mandatory to define a required parameter.")
1121 self.__required_parameters[name] = {
1122 "default" : default, # noqa: E203
1123 "typecast" : typecast, # noqa: E203
1124 "minval" : minval, # noqa: E203
1125 "maxval" : maxval, # noqa: E203
1126 "listval" : listval, # noqa: E203
1127 "listadv" : listadv, # noqa: E203
1128 "message" : message, # noqa: E203
1129 "oldname" : oldname, # noqa: E203
1131 self.__canonical_parameter_name[name.lower()] = name
1132 if oldname is not None:
1133 self.__canonical_parameter_name[oldname.lower()] = name # Conversion
1134 self.__replace_by_the_new_name[oldname.lower()] = name
1135 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
1137 def getRequiredParameters(self, noDetails=True):
1139 Renvoie la liste des noms de paramètres requis ou directement le
1140 dictionnaire des paramètres requis.
1143 return sorted(self.__required_parameters.keys())
1145 return self.__required_parameters
1147 def setParameterValue(self, name=None, value=None):
1149 Renvoie la valeur d'un paramètre requis de manière contrôlée
1151 __k = self.__canonical_parameter_name[name.lower()]
1152 default = self.__required_parameters[__k]["default"]
1153 typecast = self.__required_parameters[__k]["typecast"]
1154 minval = self.__required_parameters[__k]["minval"]
1155 maxval = self.__required_parameters[__k]["maxval"]
1156 listval = self.__required_parameters[__k]["listval"]
1157 listadv = self.__required_parameters[__k]["listadv"]
1159 if value is None and default is None:
1161 elif value is None and default is not None:
1162 if typecast is None:
1165 __val = typecast( default )
1167 if typecast is None:
1171 __val = typecast( value )
1173 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
1175 if minval is not None and (numpy.array(__val, float) < minval).any():
1176 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
1177 if maxval is not None and (numpy.array(__val, float) > maxval).any():
1178 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
1179 if listval is not None or listadv is not None:
1180 if typecast is list or typecast is tuple or isinstance(__val, list) or isinstance(__val, tuple):
1182 if listval is not None and v in listval:
1184 elif listadv is not None and v in listadv:
1187 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
1188 elif not (listval is not None and __val in listval) and not (listadv is not None and __val in listadv):
1189 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(__val, __k, listval))
1191 if __k in ["SetSeed",]:
1196 def requireInputArguments(self, mandatory=(), optional=()):
1198 Permet d'imposer des arguments de calcul requis en entrée.
1200 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
1201 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
1203 def getInputArguments(self):
1205 Permet d'obtenir les listes des arguments de calcul requis en entrée.
1207 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
1209 def setAttributes(self, tags=(), features=()):
1211 Permet d'adjoindre des attributs comme les tags de classification.
1212 Renvoie la liste actuelle dans tous les cas.
1214 self.__required_inputs["AttributesTags"].extend( tags )
1215 self.__required_inputs["AttributesFeatures"].extend( features )
1216 return (self.__required_inputs["AttributesTags"], self.__required_inputs["AttributesFeatures"])
1218 def __setParameters(self, fromDico={}, reset=False):
1220 Permet de stocker les paramètres reçus dans le dictionnaire interne.
1222 self._parameters.update( fromDico )
1223 __inverse_fromDico_keys = {}
1224 for k in fromDico.keys():
1225 if k.lower() in self.__canonical_parameter_name:
1226 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
1227 # __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
1228 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
1230 for k in __inverse_fromDico_keys.values():
1231 if k.lower() in self.__replace_by_the_new_name:
1232 __newk = self.__replace_by_the_new_name[k.lower()]
1233 __msg = "the parameter \"%s\" used in \"%s\" algorithm case is deprecated and has to be replaced by \"%s\"."%(k, self._name, __newk)
1234 __msg += " Please update your code."
1235 warnings.warn(__msg, FutureWarning, stacklevel=50)
1237 for k in self.__required_parameters.keys():
1238 if k in __canonic_fromDico_keys:
1239 self._parameters[k] = self.setParameterValue(k, fromDico[__inverse_fromDico_keys[k]])
1241 self._parameters[k] = self.setParameterValue(k)
1244 if hasattr(self._parameters[k], "size") and self._parameters[k].size > 100:
1245 logging.debug("%s %s d'une taille totale de %s", self._name, self.__required_parameters[k]["message"], self._parameters[k].size)
1246 elif hasattr(self._parameters[k], "__len__") and len(self._parameters[k]) > 100:
1247 logging.debug("%s %s de longueur %s", self._name, self.__required_parameters[k]["message"], len(self._parameters[k]))
1249 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
1251 def _setInternalState(self, key=None, value=None, fromDico={}, reset=False):
1253 Permet de stocker des variables nommées constituant l'état interne
1255 if reset: # Vide le dictionnaire préalablement
1256 self.__internal_state = {}
1257 if key is not None and value is not None:
1258 self.__internal_state[key] = value
1259 self.__internal_state.update( dict(fromDico) )
1261 def _getInternalState(self, key=None):
1263 Restitue un état interne sous la forme d'un dictionnaire de variables nommées
1265 if key is not None and key in self.__internal_state:
1266 return self.__internal_state[key]
1268 return self.__internal_state
1270 def _getTimeState(self, reset=False):
1272 Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde
1275 self.__initial_cpu_time = time.process_time()
1276 self.__initial_elapsed_time = time.perf_counter()
1279 self.__cpu_time = time.process_time() - self.__initial_cpu_time
1280 self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time
1281 return self.__cpu_time, self.__elapsed_time
1283 def _StopOnTimeLimit(self, X=None, withReason=False):
1284 "Stop criteria on time limit: True/False [+ Reason]"
1285 c, e = self._getTimeState()
1286 if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]:
1287 __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"])
1288 elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]:
1289 __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"])
1291 __SC, __SR = False, ""
1297 # ==============================================================================
1298 class PartialAlgorithm(object):
1300 Classe pour mimer "Algorithm" du point de vue stockage, mais sans aucune
1301 action avancée comme la vérification . Pour les méthodes reprises ici,
1302 le fonctionnement est identique à celles de la classe "Algorithm".
1305 "_name", "_parameters", "StoredVariables", "__canonical_stored_name",
1308 def __init__(self, name):
1309 self._name = str( name )
1310 self._parameters = {"StoreSupplementaryCalculations": []}
1312 self.StoredVariables = {}
1313 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
1314 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
1315 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
1316 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
1317 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
1318 self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
1320 self.__canonical_stored_name = {}
1321 for k in self.StoredVariables:
1322 self.__canonical_stored_name[k.lower()] = k
1324 def _toStore(self, key):
1325 "True if in StoreSupplementaryCalculations, else False"
1326 return key in self._parameters["StoreSupplementaryCalculations"]
1328 def get(self, key=None):
1330 Renvoie l'une des variables stockées identifiée par la clé, ou le
1331 dictionnaire de l'ensemble des variables disponibles en l'absence de
1332 clé. Ce sont directement les variables sous forme objet qui sont
1333 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
1334 des classes de persistance.
1337 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
1339 return self.StoredVariables
1341 # ==============================================================================
1342 class AlgorithmAndParameters(object):
1344 Classe générale d'interface d'action pour l'algorithme et ses paramètres
1347 "__name", "__algorithm", "__algorithmFile", "__algorithmName", "__A",
1348 "__P", "__Xb", "__Y", "__U", "__HO", "__EM", "__CM", "__B", "__R",
1349 "__Q", "__variable_names_not_public",
1353 name = "GenericAlgorithm",
1359 self.__name = str(name)
1363 self.__algorithm = {}
1364 self.__algorithmFile = None
1365 self.__algorithmName = None
1367 self.updateParameters( asDict, asScript )
1369 if asAlgorithm is None and asScript is not None:
1370 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1372 __Algo = asAlgorithm
1374 if __Algo is not None:
1375 self.__A = str(__Algo)
1376 self.__P.update( {"Algorithm": self.__A} )
1378 self.__setAlgorithm( self.__A )
1380 self.__variable_names_not_public = {"nextStep": False} # Duplication dans Algorithm
1382 def updateParameters(self, asDict = None, asScript = None ):
1383 "Mise à jour des paramètres"
1384 if asDict is None and asScript is not None:
1385 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1389 if __Dict is not None:
1390 self.__P.update( dict(__Dict) )
1392 def executePythonScheme(self, asDictAO = None):
1393 "Permet de lancer le calcul d'assimilation"
1394 Operator.CM.clearCache()
1396 if not isinstance(asDictAO, dict):
1397 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1398 if hasattr(asDictAO["Background"], "getO"): self.__Xb = asDictAO["Background"].getO() # noqa: E241,E701
1399 elif hasattr(asDictAO["CheckingPoint"], "getO"): self.__Xb = asDictAO["CheckingPoint"].getO() # noqa: E241,E701
1400 else: self.__Xb = None # noqa: E241,E701
1401 if hasattr(asDictAO["Observation"], "getO"): self.__Y = asDictAO["Observation"].getO() # noqa: E241,E701
1402 else: self.__Y = asDictAO["Observation"] # noqa: E241,E701
1403 if hasattr(asDictAO["ControlInput"], "getO"): self.__U = asDictAO["ControlInput"].getO() # noqa: E241,E701
1404 else: self.__U = asDictAO["ControlInput"] # noqa: E241,E701
1405 if hasattr(asDictAO["ObservationOperator"], "getO"): self.__HO = asDictAO["ObservationOperator"].getO() # noqa: E241,E701
1406 else: self.__HO = asDictAO["ObservationOperator"] # noqa: E241,E701
1407 if hasattr(asDictAO["EvolutionModel"], "getO"): self.__EM = asDictAO["EvolutionModel"].getO() # noqa: E241,E701
1408 else: self.__EM = asDictAO["EvolutionModel"] # noqa: E241,E701
1409 if hasattr(asDictAO["ControlModel"], "getO"): self.__CM = asDictAO["ControlModel"].getO() # noqa: E241,E701
1410 else: self.__CM = asDictAO["ControlModel"] # noqa: E241,E701
1411 self.__B = asDictAO["BackgroundError"]
1412 self.__R = asDictAO["ObservationError"]
1413 self.__Q = asDictAO["EvolutionError"]
1415 self.__shape_validate()
1417 self.__algorithm.run(
1427 Parameters = self.__P,
1431 def executeYACSScheme(self, FileName=None):
1432 "Permet de lancer le calcul d'assimilation"
1433 if FileName is None or not os.path.exists(FileName):
1434 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1436 __file = os.path.abspath(FileName)
1437 logging.debug("The YACS file name is \"%s\"."%__file)
1438 if not PlatformInfo.has_salome or \
1439 not PlatformInfo.has_yacs or \
1440 not PlatformInfo.has_adao:
1443 "Unable to get SALOME, YACS or ADAO environnement variables.\n" + \
1444 "Please load the right environnement before trying to use it.\n" )
1447 import SALOMERuntime
1449 SALOMERuntime.RuntimeSALOME_setRuntime()
1451 r = pilot.getRuntime()
1452 xmlLoader = loader.YACSLoader()
1453 xmlLoader.registerProcCataLoader()
1455 catalogAd = r.loadCatalog("proc", __file)
1456 r.addCatalog(catalogAd)
1461 p = xmlLoader.load(__file)
1462 except IOError as ex:
1463 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1465 logger = p.getLogger("parser")
1466 if not logger.isEmpty():
1467 print("The imported YACS XML schema has errors on parsing:")
1468 print(logger.getStr())
1471 print("The YACS XML schema is not valid and will not be executed:")
1472 print(p.getErrorReport())
1474 info = pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1475 p.checkConsistency(info)
1476 if info.areWarningsOrErrors():
1477 print("The YACS XML schema is not coherent and will not be executed:")
1478 print(info.getGlobalRepr())
1480 e = pilot.ExecutorSwig()
1482 if p.getEffectiveState() != pilot.DONE:
1483 print(p.getErrorReport())
1487 def get(self, key = None):
1488 "Vérifie l'existence d'une clé de variable ou de paramètres"
1489 if key in self.__algorithm:
1490 return self.__algorithm.get( key )
1491 elif key in self.__P:
1492 return self.__P[key]
1494 allvariables = self.__P
1495 for k in self.__variable_names_not_public:
1496 allvariables.pop(k, None)
1499 def pop(self, k, d):
1500 "Necessaire pour le pickling"
1501 return self.__algorithm.pop(k, d)
1503 def getAlgorithmRequiredParameters(self, noDetails=True):
1504 "Renvoie la liste des paramètres requis selon l'algorithme"
1505 return self.__algorithm.getRequiredParameters(noDetails)
1507 def getAlgorithmInputArguments(self):
1508 "Renvoie la liste des entrées requises selon l'algorithme"
1509 return self.__algorithm.getInputArguments()
1511 def getAlgorithmAttributes(self):
1512 "Renvoie la liste des attributs selon l'algorithme"
1513 return self.__algorithm.setAttributes()
1515 def setObserver(self, __V, __O, __I, __S):
1516 if self.__algorithm is None \
1517 or isinstance(self.__algorithm, dict) \
1518 or not hasattr(self.__algorithm, "StoredVariables"):
1519 raise ValueError("No observer can be build before choosing an algorithm.")
1520 if __V not in self.__algorithm:
1521 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1523 self.__algorithm.StoredVariables[ __V ].setDataObserver( Scheduler = __S, HookFunction = __O, HookParameters = __I )
1525 def removeObserver(self, __V, __O, __A = False):
1526 if self.__algorithm is None \
1527 or isinstance(self.__algorithm, dict) \
1528 or not hasattr(self.__algorithm, "StoredVariables"):
1529 raise ValueError("No observer can be removed before choosing an algorithm.")
1530 if __V not in self.__algorithm:
1531 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1533 return self.__algorithm.StoredVariables[ __V ].removeDataObserver( HookFunction = __O, AllObservers = __A )
1535 def hasObserver(self, __V):
1536 if self.__algorithm is None \
1537 or isinstance(self.__algorithm, dict) \
1538 or not hasattr(self.__algorithm, "StoredVariables"):
1540 if __V not in self.__algorithm:
1542 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1545 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1546 for k in self.__variable_names_not_public:
1547 if k in __allvariables:
1548 __allvariables.remove(k)
1549 return __allvariables
1551 def __contains__(self, key=None):
1552 "D.__contains__(k) -> True if D has a key k, else False"
1553 return key in self.__algorithm or key in self.__P
1556 "x.__repr__() <==> repr(x)"
1557 return repr(self.__A) + ", " + repr(self.__P)
1560 "x.__str__() <==> str(x)"
1561 return str(self.__A) + ", " + str(self.__P)
1563 def __setAlgorithm(self, choice = None ):
1565 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1566 d'assimilation. L'argument est un champ caractère se rapportant au nom
1567 d'un algorithme réalisant l'opération sur les arguments fixes.
1570 raise ValueError("Error: algorithm choice has to be given")
1571 if self.__algorithmName is not None:
1572 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1573 daDirectory = "daAlgorithms"
1575 # Recherche explicitement le fichier complet
1576 # ------------------------------------------
1578 for directory in sys.path:
1579 if os.path.isfile(os.path.join(directory, daDirectory, str(choice) + '.py')):
1580 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1581 if module_path is None:
1583 "No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1585 # Importe le fichier complet comme un module
1586 # ------------------------------------------
1588 sys_path_tmp = sys.path
1589 sys.path.insert(0, module_path)
1590 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1591 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1592 raise ImportError("this module does not define a valid elementary algorithm.")
1593 self.__algorithmName = str(choice)
1594 sys.path = sys_path_tmp
1596 except ImportError as e:
1598 "The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice, e))
1600 # Instancie un objet du type élémentaire du fichier
1601 # -------------------------------------------------
1602 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1605 def __shape_validate(self):
1607 Validation de la correspondance correcte des tailles des variables et
1608 des matrices s'il y en a.
1610 if self.__Xb is None: __Xb_shape = (0,) # noqa: E241,E701
1611 elif hasattr(self.__Xb, "size"): __Xb_shape = (self.__Xb.size,) # noqa: E241,E701
1612 elif hasattr(self.__Xb, "shape"):
1613 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape # noqa: E241,E701
1614 else: __Xb_shape = self.__Xb.shape() # noqa: E241,E701
1615 else: raise TypeError("The background (Xb) has no attribute of shape: problem !") # noqa: E701
1617 if self.__Y is None: __Y_shape = (0,) # noqa: E241,E701
1618 elif hasattr(self.__Y, "size"): __Y_shape = (self.__Y.size,) # noqa: E241,E701
1619 elif hasattr(self.__Y, "shape"):
1620 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape # noqa: E241,E701
1621 else: __Y_shape = self.__Y.shape() # noqa: E241,E701
1622 else: raise TypeError("The observation (Y) has no attribute of shape: problem !") # noqa: E701
1624 if self.__U is None: __U_shape = (0,) # noqa: E241,E701
1625 elif hasattr(self.__U, "size"): __U_shape = (self.__U.size,) # noqa: E241,E701
1626 elif hasattr(self.__U, "shape"):
1627 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape # noqa: E241,E701
1628 else: __U_shape = self.__U.shape() # noqa: E241,E701
1629 else: raise TypeError("The control (U) has no attribute of shape: problem !") # noqa: E701
1631 if self.__B is None: __B_shape = (0, 0) # noqa: E241,E701
1632 elif hasattr(self.__B, "shape"):
1633 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape # noqa: E241,E701
1634 else: __B_shape = self.__B.shape() # noqa: E241,E701
1635 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !") # noqa: E701
1637 if self.__R is None: __R_shape = (0, 0) # noqa: E241,E701
1638 elif hasattr(self.__R, "shape"):
1639 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape # noqa: E241,E701
1640 else: __R_shape = self.__R.shape() # noqa: E241,E701
1641 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !") # noqa: E701
1643 if self.__Q is None: __Q_shape = (0, 0) # noqa: E241,E701
1644 elif hasattr(self.__Q, "shape"):
1645 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape # noqa: E241,E701
1646 else: __Q_shape = self.__Q.shape() # noqa: E241,E701
1647 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !") # noqa: E701
1649 if len(self.__HO) == 0: __HO_shape = (0, 0) # noqa: E241,E701
1650 elif isinstance(self.__HO, dict): __HO_shape = (0, 0) # noqa: E241,E701
1651 elif hasattr(self.__HO["Direct"], "shape"):
1652 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape # noqa: E241,E701
1653 else: __HO_shape = self.__HO["Direct"].shape() # noqa: E241,E701
1654 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !") # noqa: E701
1656 if len(self.__EM) == 0: __EM_shape = (0, 0) # noqa: E241,E701
1657 elif isinstance(self.__EM, dict): __EM_shape = (0, 0) # noqa: E241,E701
1658 elif hasattr(self.__EM["Direct"], "shape"):
1659 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape # noqa: E241,E701
1660 else: __EM_shape = self.__EM["Direct"].shape() # noqa: E241,E701
1661 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !") # noqa: E241,E70
1663 if len(self.__CM) == 0: __CM_shape = (0, 0) # noqa: E241,E701
1664 elif isinstance(self.__CM, dict): __CM_shape = (0, 0) # noqa: E241,E701
1665 elif hasattr(self.__CM["Direct"], "shape"):
1666 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape # noqa: E241,E701
1667 else: __CM_shape = self.__CM["Direct"].shape() # noqa: E241,E701
1668 else: raise TypeError("The control model (CM) has no attribute of shape: problem !") # noqa: E701
1670 # Vérification des conditions
1671 # ---------------------------
1672 if not ( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1673 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1674 if not ( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1675 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1677 if not ( min(__B_shape) == max(__B_shape) ):
1678 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1679 if not ( min(__R_shape) == max(__R_shape) ):
1680 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1681 if not ( min(__Q_shape) == max(__Q_shape) ):
1682 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1683 if not ( min(__EM_shape) == max(__EM_shape) ):
1684 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1686 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not ( __HO_shape[1] == max(__Xb_shape) ):
1688 "Shape characteristic of observation operator (H)" + \
1689 " \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape, __Xb_shape))
1690 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not ( __HO_shape[0] == max(__Y_shape) ):
1692 "Shape characteristic of observation operator (H)" + \
1693 " \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape, __Y_shape))
1694 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not ( __HO_shape[1] == __B_shape[0] ):
1696 "Shape characteristic of observation operator (H)" + \
1697 " \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape, __B_shape))
1698 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not ( __HO_shape[0] == __R_shape[1] ):
1700 "Shape characteristic of observation operator (H)" + \
1701 " \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape, __R_shape))
1703 if self.__B is not None and len(self.__B) > 0 and not ( __B_shape[1] == max(__Xb_shape) ):
1704 if self.__algorithmName in ["EnsembleBlue",]:
1705 asPersistentVector = self.__Xb.reshape((-1, min(__B_shape)))
1706 self.__Xb = Persistence.OneVector("Background")
1707 for member in asPersistentVector:
1708 self.__Xb.store( numpy.asarray(member, dtype=float) )
1709 __Xb_shape = min(__B_shape)
1712 "Shape characteristic of a priori errors covariance matrix (B)" + \
1713 " \"%s\" and background vector (Xb) \"%s\" are incompatible."%(__B_shape, __Xb_shape))
1715 if self.__R is not None and len(self.__R) > 0 and not ( __R_shape[1] == max(__Y_shape) ):
1717 "Shape characteristic of observation errors covariance matrix (R)" + \
1718 " \"%s\" and observation vector (Y) \"%s\" are incompatible."%(__R_shape, __Y_shape))
1720 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not ( __EM_shape[1] == max(__Xb_shape) ):
1722 "Shape characteristic of evolution model (EM)" + \
1723 " \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape, __Xb_shape))
1725 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not ( __CM_shape[1] == max(__U_shape) ):
1727 "Shape characteristic of control model (CM)" + \
1728 " \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape, __U_shape))
1730 if ("Bounds" in self.__P) \
1731 and isinstance(self.__P["Bounds"], (list, tuple)) \
1732 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1733 if len(self.__P["Bounds"]) > 0:
1734 raise ValueError("The number '%s' of bound pairs for the state components is different from the size '%s' of the state (X) itself." \
1735 % (len(self.__P["Bounds"]), max(__Xb_shape)))
1737 self.__P["Bounds"] = None
1738 if ("Bounds" in self.__P) \
1739 and isinstance(self.__P["Bounds"], (numpy.ndarray, numpy.matrix)) \
1740 and (self.__P["Bounds"].shape[0] != max(__Xb_shape)):
1741 if self.__P["Bounds"].size > 0:
1742 raise ValueError("The number '%s' of bound pairs for the state components is different from the size '%s' of the state (X) itself." \
1743 % (self.__P["Bounds"].shape[0], max(__Xb_shape)))
1745 self.__P["Bounds"] = None
1747 if ("BoxBounds" in self.__P) \
1748 and isinstance(self.__P["BoxBounds"], (list, tuple)) \
1749 and (len(self.__P["BoxBounds"]) != max(__Xb_shape)):
1750 raise ValueError("The number '%s' of bound pairs for the state box components is different from the size '%s' of the state (X) itself." \
1751 % (len(self.__P["BoxBounds"]), max(__Xb_shape)))
1752 if ("BoxBounds" in self.__P) \
1753 and isinstance(self.__P["BoxBounds"], (numpy.ndarray, numpy.matrix)) \
1754 and (self.__P["BoxBounds"].shape[0] != max(__Xb_shape)):
1755 raise ValueError("The number '%s' of bound pairs for the state box components is different from the size '%s' of the state (X) itself." \
1756 % (self.__P["BoxBounds"].shape[0], max(__Xb_shape)))
1758 if ("StateBoundsForQuantiles" in self.__P) \
1759 and isinstance(self.__P["StateBoundsForQuantiles"], (list, tuple)) \
1760 and (len(self.__P["StateBoundsForQuantiles"]) != max(__Xb_shape)):
1761 raise ValueError("The number '%s' of bound pairs for the quantile state components is different from the size '%s' of the state (X) itself." \
1762 % (len(self.__P["StateBoundsForQuantiles"]), max(__Xb_shape)))
1766 # ==============================================================================
1767 class RegulationAndParameters(object):
1769 Classe générale d'interface d'action pour la régulation et ses paramètres
1771 __slots__ = ("__name", "__P")
1774 name = "GenericRegulation",
1780 self.__name = str(name)
1783 if asAlgorithm is None and asScript is not None:
1784 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1786 __Algo = asAlgorithm
1788 if asDict is None and asScript is not None:
1789 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1793 if __Dict is not None:
1794 self.__P.update( dict(__Dict) )
1796 if __Algo is not None:
1797 self.__P.update( {"Algorithm": str(__Algo)} )
1799 def get(self, key = None):
1800 "Vérifie l'existence d'une clé de variable ou de paramètres"
1802 return self.__P[key]
1806 # ==============================================================================
1807 class DataObserver(object):
1809 Classe générale d'interface de type observer
1811 __slots__ = ("__name", "__V", "__O", "__I")
1814 name = "GenericObserver",
1825 self.__name = str(name)
1830 if onVariable is None:
1831 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1832 elif type(onVariable) in (tuple, list):
1833 self.__V = tuple(map( str, onVariable ))
1834 if withInfo is None:
1837 self.__I = (str(withInfo),) * len(self.__V)
1838 elif isinstance(onVariable, str):
1839 self.__V = (onVariable,)
1840 if withInfo is None:
1841 self.__I = (onVariable,)
1843 self.__I = (str(withInfo),)
1845 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1847 if asObsObject is not None:
1848 self.__O = asObsObject
1850 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1851 __Function = Observer2Func(__FunctionText)
1852 self.__O = __Function.getfunc()
1854 for k in range(len(self.__V)):
1857 if ename not in withAlgo:
1858 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1860 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1863 "x.__repr__() <==> repr(x)"
1864 return repr(self.__V) + "\n" + repr(self.__O)
1867 "x.__str__() <==> str(x)"
1868 return str(self.__V) + "\n" + str(self.__O)
1870 # ==============================================================================
1871 class UserScript(object):
1873 Classe générale d'interface de type texte de script utilisateur
1875 __slots__ = ("__name", "__F")
1878 name = "GenericUserScript",
1884 self.__name = str(name)
1886 if asString is not None:
1888 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1889 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1890 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1891 self.__F = Templates.ObserverTemplates[asTemplate]
1892 elif asScript is not None:
1893 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1898 "x.__repr__() <==> repr(x)"
1899 return repr(self.__F)
1902 "x.__str__() <==> str(x)"
1903 return str(self.__F)
1905 # ==============================================================================
1906 class ExternalParameters(object):
1908 Classe générale d'interface pour le stockage des paramètres externes
1910 __slots__ = ("__name", "__P")
1913 name = "GenericExternalParameters",
1918 self.__name = str(name)
1921 self.updateParameters( asDict, asScript )
1923 def updateParameters(self, asDict = None, asScript = None ):
1924 "Mise à jour des paramètres"
1925 if asDict is None and asScript is not None:
1926 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1930 if __Dict is not None:
1931 self.__P.update( dict(__Dict) )
1933 def get(self, key = None):
1935 return self.__P[key]
1937 return list(self.__P.keys())
1940 return list(self.__P.keys())
1942 def pop(self, k, d):
1943 return self.__P.pop(k, d)
1946 return self.__P.items()
1948 def __contains__(self, key=None):
1949 "D.__contains__(k) -> True if D has a key k, else False"
1950 return key in self.__P
1952 # ==============================================================================
1953 class State(object):
1955 Classe générale d'interface de type état
1958 "__name", "__check", "__V", "__T", "__is_vector", "__is_series",
1963 name = "GenericVector",
1965 asPersistentVector = None,
1971 toBeChecked = False ):
1973 Permet de définir un vecteur :
1974 - asVector : entrée des données, comme un vecteur compatible avec le
1975 constructeur de numpy.matrix, ou "True" si entrée par script.
1976 - asPersistentVector : entrée des données, comme une série de vecteurs
1977 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1978 type Persistence, ou "True" si entrée par script.
1979 - asScript : si un script valide est donné contenant une variable
1980 nommée "name", la variable est de type "asVector" (par défaut) ou
1981 "asPersistentVector" selon que l'une de ces variables est placée à
1983 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1984 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1985 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1986 nommée "name"), on récupère les colonnes et on les range ligne après
1987 ligne (colMajor=False, par défaut) ou colonne après colonne
1988 (colMajor=True). La variable résultante est de type "asVector" (par
1989 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1992 self.__name = str(name)
1993 self.__check = bool(toBeChecked)
1997 self.__is_vector = False
1998 self.__is_series = False
2000 if asScript is not None:
2001 __Vector, __Series = None, None
2002 if asPersistentVector:
2003 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2005 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2006 elif asDataFile is not None:
2007 __Vector, __Series = None, None
2008 if asPersistentVector:
2009 if colNames is not None:
2010 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
2012 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
2013 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
2014 __Series = numpy.transpose(__Series)
2015 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
2016 __Series = numpy.transpose(__Series)
2018 if colNames is not None:
2019 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
2021 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
2023 __Vector = numpy.ravel(__Vector, order = "F")
2025 __Vector = numpy.ravel(__Vector, order = "C")
2027 __Vector, __Series = asVector, asPersistentVector
2029 if __Vector is not None:
2030 self.__is_vector = True
2031 if isinstance(__Vector, str):
2032 __Vector = PlatformInfo.strvect2liststr( __Vector )
2033 self.__V = numpy.ravel(numpy.asarray( __Vector, dtype=float )).reshape((-1, 1))
2034 self.shape = self.__V.shape
2035 self.size = self.__V.size
2036 elif __Series is not None:
2037 self.__is_series = True
2038 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
2039 self.__V = Persistence.OneVector(self.__name)
2040 if isinstance(__Series, str):
2041 __Series = PlatformInfo.strmatrix2liststr(__Series)
2042 for member in __Series:
2043 if isinstance(member, str):
2044 member = PlatformInfo.strvect2liststr( member )
2045 self.__V.store(numpy.asarray( member, dtype=float ))
2048 if isinstance(self.__V.shape, (tuple, list)):
2049 self.shape = self.__V.shape
2051 self.shape = self.__V.shape()
2052 if len(self.shape) == 1:
2053 self.shape = (self.shape[0], 1)
2054 self.size = self.shape[0] * self.shape[1]
2057 "The %s object is improperly defined or undefined,"%self.__name + \
2058 " it requires at minima either a vector, a list/tuple of" + \
2059 " vectors or a persistent object. Please check your vector input.")
2061 if scheduledBy is not None:
2062 self.__T = scheduledBy
2064 def getO(self, withScheduler=False):
2066 return self.__V, self.__T
2067 elif self.__T is None:
2073 "Vérification du type interne"
2074 return self.__is_vector
2077 "Vérification du type interne"
2078 return self.__is_series
2081 "x.__repr__() <==> repr(x)"
2082 return repr(self.__V)
2085 "x.__str__() <==> str(x)"
2086 return str(self.__V)
2088 # ==============================================================================
2089 class Covariance(object):
2091 Classe générale d'interface de type covariance
2094 "__name", "__check", "__C", "__is_scalar", "__is_vector", "__is_matrix",
2095 "__is_object", "shape", "size",
2099 name = "GenericCovariance",
2100 asCovariance = None,
2101 asEyeByScalar = None,
2102 asEyeByVector = None,
2105 toBeChecked = False ):
2107 Permet de définir une covariance :
2108 - asCovariance : entrée des données, comme une matrice compatible avec
2109 le constructeur de numpy.matrix
2110 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
2111 multiplicatif d'une matrice de corrélation identité, aucune matrice
2112 n'étant donc explicitement à donner
2113 - asEyeByVector : entrée des données comme un seul vecteur de variance,
2114 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
2115 n'étant donc explicitement à donner
2116 - asCovObject : entrée des données comme un objet python, qui a les
2117 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
2118 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
2119 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
2120 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
2121 pleine doit être vérifié
2123 self.__name = str(name)
2124 self.__check = bool(toBeChecked)
2127 self.__is_scalar = False
2128 self.__is_vector = False
2129 self.__is_matrix = False
2130 self.__is_object = False
2132 if asScript is not None:
2133 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
2135 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2137 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2139 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2141 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2143 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
2145 if __Scalar is not None:
2146 if isinstance(__Scalar, str):
2147 __Scalar = PlatformInfo.strvect2liststr( __Scalar )
2148 if len(__Scalar) > 0:
2149 __Scalar = __Scalar[0]
2150 if numpy.array(__Scalar).size != 1:
2152 " The diagonal multiplier given to define a sparse matrix is" + \
2153 " not a unique scalar value.\n Its actual measured size is" + \
2154 " %i. Please check your scalar input."%numpy.array(__Scalar).size)
2155 self.__is_scalar = True
2156 self.__C = numpy.abs( float(__Scalar) )
2159 elif __Vector is not None:
2160 if isinstance(__Vector, str):
2161 __Vector = PlatformInfo.strvect2liststr( __Vector )
2162 self.__is_vector = True
2163 self.__C = numpy.abs( numpy.ravel(numpy.asarray( __Vector, dtype=float )) )
2164 self.shape = (self.__C.size, self.__C.size)
2165 self.size = self.__C.size**2
2166 elif __Matrix is not None:
2167 self.__is_matrix = True
2168 self.__C = numpy.matrix( __Matrix, float )
2169 self.shape = self.__C.shape
2170 self.size = self.__C.size
2171 elif __Object is not None:
2172 self.__is_object = True
2174 for at in ("getT", "getI", "diag", "trace", "__add__", "__sub__", "__neg__", "__matmul__", "__mul__", "__rmatmul__", "__rmul__"):
2175 if not hasattr(self.__C, at):
2176 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name, at))
2177 if hasattr(self.__C, "shape"):
2178 self.shape = self.__C.shape
2181 if hasattr(self.__C, "size"):
2182 self.size = self.__C.size
2190 def __validate(self):
2192 if self.__C is None:
2193 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
2194 if self.ismatrix() and min(self.shape) != max(self.shape):
2195 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name, self.shape))
2196 if self.isobject() and min(self.shape) != max(self.shape):
2197 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name, self.shape))
2198 if self.isscalar() and self.__C <= 0:
2199 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name, self.__C))
2200 if self.isvector() and (self.__C <= 0).any():
2201 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
2202 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
2204 numpy.linalg.cholesky( self.__C )
2206 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
2207 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
2211 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
2214 "Vérification du type interne"
2215 return self.__is_scalar
2218 "Vérification du type interne"
2219 return self.__is_vector
2222 "Vérification du type interne"
2223 return self.__is_matrix
2226 "Vérification du type interne"
2227 return self.__is_object
2232 return Covariance(self.__name + "I", asCovariance = numpy.linalg.inv(self.__C) )
2233 elif self.isvector():
2234 return Covariance(self.__name + "I", asEyeByVector = 1. / self.__C )
2235 elif self.isscalar():
2236 return Covariance(self.__name + "I", asEyeByScalar = 1. / self.__C )
2237 elif self.isobject() and hasattr(self.__C, "getI"):
2238 return Covariance(self.__name + "I", asCovObject = self.__C.getI() )
2240 return None # Indispensable
2245 return Covariance(self.__name + "T", asCovariance = self.__C.T )
2246 elif self.isvector():
2247 return Covariance(self.__name + "T", asEyeByVector = self.__C )
2248 elif self.isscalar():
2249 return Covariance(self.__name + "T", asEyeByScalar = self.__C )
2250 elif self.isobject() and hasattr(self.__C, "getT"):
2251 return Covariance(self.__name + "T", asCovObject = self.__C.getT() )
2253 raise AttributeError("the %s covariance matrix has no getT attribute."%(self.__name,))
2256 "Décomposition de Cholesky"
2258 return Covariance(self.__name + "C", asCovariance = numpy.linalg.cholesky(self.__C) )
2259 elif self.isvector():
2260 return Covariance(self.__name + "C", asEyeByVector = numpy.sqrt( self.__C ) )
2261 elif self.isscalar():
2262 return Covariance(self.__name + "C", asEyeByScalar = numpy.sqrt( self.__C ) )
2263 elif self.isobject() and hasattr(self.__C, "cholesky"):
2264 return Covariance(self.__name + "C", asCovObject = self.__C.cholesky() )
2266 raise AttributeError("the %s covariance matrix has no cholesky attribute."%(self.__name,))
2268 def choleskyI(self):
2269 "Inversion de la décomposition de Cholesky"
2271 return Covariance(self.__name + "H", asCovariance = numpy.linalg.inv(numpy.linalg.cholesky(self.__C)) )
2272 elif self.isvector():
2273 return Covariance(self.__name + "H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
2274 elif self.isscalar():
2275 return Covariance(self.__name + "H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
2276 elif self.isobject() and hasattr(self.__C, "choleskyI"):
2277 return Covariance(self.__name + "H", asCovObject = self.__C.choleskyI() )
2279 raise AttributeError("the %s covariance matrix has no choleskyI attribute."%(self.__name,))
2282 "Racine carrée matricielle"
2285 return Covariance(self.__name + "C", asCovariance = numpy.real(scipy.linalg.sqrtm(self.__C)) )
2286 elif self.isvector():
2287 return Covariance(self.__name + "C", asEyeByVector = numpy.sqrt( self.__C ) )
2288 elif self.isscalar():
2289 return Covariance(self.__name + "C", asEyeByScalar = numpy.sqrt( self.__C ) )
2290 elif self.isobject() and hasattr(self.__C, "sqrtm"):
2291 return Covariance(self.__name + "C", asCovObject = self.__C.sqrtm() )
2293 raise AttributeError("the %s covariance matrix has no sqrtm attribute."%(self.__name,))
2296 "Inversion de la racine carrée matricielle"
2299 return Covariance(self.__name + "H", asCovariance = numpy.linalg.inv(numpy.real(scipy.linalg.sqrtm(self.__C))) )
2300 elif self.isvector():
2301 return Covariance(self.__name + "H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
2302 elif self.isscalar():
2303 return Covariance(self.__name + "H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
2304 elif self.isobject() and hasattr(self.__C, "sqrtmI"):
2305 return Covariance(self.__name + "H", asCovObject = self.__C.sqrtmI() )
2307 raise AttributeError("the %s covariance matrix has no sqrtmI attribute."%(self.__name,))
2309 def diag(self, msize=None):
2310 "Diagonale de la matrice"
2312 return numpy.diag(self.__C)
2313 elif self.isvector():
2315 elif self.isscalar():
2317 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2319 return self.__C * numpy.ones(int(msize))
2320 elif self.isobject() and hasattr(self.__C, "diag"):
2321 return self.__C.diag()
2323 raise AttributeError("the %s covariance matrix has no diag attribute."%(self.__name,))
2325 def trace(self, msize=None):
2326 "Trace de la matrice"
2328 return numpy.trace(self.__C)
2329 elif self.isvector():
2330 return float(numpy.sum(self.__C))
2331 elif self.isscalar():
2333 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2335 return self.__C * int(msize)
2336 elif self.isobject():
2337 return self.__C.trace()
2339 raise AttributeError("the %s covariance matrix has no trace attribute."%(self.__name,))
2341 def asfullmatrix(self, msize=None):
2344 return numpy.asarray(self.__C, dtype=float)
2345 elif self.isvector():
2346 return numpy.asarray( numpy.diag(self.__C), dtype=float )
2347 elif self.isscalar():
2349 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2351 return numpy.asarray( self.__C * numpy.eye(int(msize)), dtype=float )
2352 elif self.isobject() and hasattr(self.__C, "asfullmatrix"):
2353 return self.__C.asfullmatrix()
2355 raise AttributeError("the %s covariance matrix has no asfullmatrix attribute."%(self.__name,))
2357 def assparsematrix(self):
2365 "x.__repr__() <==> repr(x)"
2366 return repr(self.__C)
2369 "x.__str__() <==> str(x)"
2370 return str(self.__C)
2372 def __add__(self, other):
2373 "x.__add__(y) <==> x+y"
2374 if self.ismatrix() or self.isobject():
2375 return self.__C + numpy.asmatrix(other)
2376 elif self.isvector() or self.isscalar():
2377 _A = numpy.asarray(other)
2378 if len(_A.shape) == 1:
2379 _A.reshape((-1, 1))[::2] += self.__C
2381 _A.reshape(_A.size)[::_A.shape[1] + 1] += self.__C
2382 return numpy.asmatrix(_A)
2384 def __radd__(self, other):
2385 "x.__radd__(y) <==> y+x"
2386 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name, type(other)))
2388 def __sub__(self, other):
2389 "x.__sub__(y) <==> x-y"
2390 if self.ismatrix() or self.isobject():
2391 return self.__C - numpy.asmatrix(other)
2392 elif self.isvector() or self.isscalar():
2393 _A = numpy.asarray(other)
2394 _A.reshape(_A.size)[::_A.shape[1] + 1] = self.__C - _A.reshape(_A.size)[::_A.shape[1] + 1]
2395 return numpy.asmatrix(_A)
2397 def __rsub__(self, other):
2398 "x.__rsub__(y) <==> y-x"
2399 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name, type(other)))
2402 "x.__neg__() <==> -x"
2405 def __matmul__(self, other):
2406 "x.__mul__(y) <==> x@y"
2407 if self.ismatrix() and isinstance(other, (int, float)):
2408 return numpy.asarray(self.__C) * other
2409 elif self.ismatrix() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2410 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2411 return numpy.ravel(self.__C @ numpy.ravel(other))
2412 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2413 return numpy.asarray(self.__C) @ numpy.asarray(other)
2415 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape, numpy.asarray(other).shape, self.__name))
2416 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2417 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2418 return numpy.ravel(self.__C) * numpy.ravel(other)
2419 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2420 return numpy.ravel(self.__C).reshape((-1, 1)) * numpy.asarray(other)
2422 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape, numpy.ravel(other).shape, self.__name))
2423 elif self.isscalar() and isinstance(other, numpy.matrix):
2424 return numpy.asarray(self.__C * other)
2425 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2426 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2427 return self.__C * numpy.ravel(other)
2429 return self.__C * numpy.asarray(other)
2430 elif self.isobject():
2431 return self.__C.__matmul__(other)
2433 raise NotImplementedError("%s covariance matrix __matmul__ method not available for %s type!"%(self.__name, type(other)))
2435 def __mul__(self, other):
2436 "x.__mul__(y) <==> x*y"
2437 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2438 return self.__C * other
2439 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2440 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2441 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2442 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2443 return self.__C * numpy.asmatrix(other)
2446 "operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape, numpy.asmatrix(other).shape, self.__name))
2447 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2448 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2449 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
2450 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2451 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
2454 "operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape, numpy.ravel(other).shape, self.__name))
2455 elif self.isscalar() and isinstance(other, numpy.matrix):
2456 return self.__C * other
2457 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2458 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2459 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2461 return self.__C * numpy.asmatrix(other)
2462 elif self.isobject():
2463 return self.__C.__mul__(other)
2465 raise NotImplementedError(
2466 "%s covariance matrix __mul__ method not available for %s type!"%(self.__name, type(other)))
2468 def __rmatmul__(self, other):
2469 "x.__rmul__(y) <==> y@x"
2470 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2471 return other * self.__C
2472 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2473 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2474 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2475 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2476 return numpy.asmatrix(other) * self.__C
2479 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape, self.shape, self.__name))
2480 elif self.isvector() and isinstance(other, numpy.matrix):
2481 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2482 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2483 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2484 return numpy.asmatrix(numpy.array(other) * self.__C)
2487 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape, self.shape, self.__name))
2488 elif self.isscalar() and isinstance(other, numpy.matrix):
2489 return other * self.__C
2490 elif self.isobject():
2491 return self.__C.__rmatmul__(other)
2493 raise NotImplementedError(
2494 "%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name, type(other)))
2496 def __rmul__(self, other):
2497 "x.__rmul__(y) <==> y*x"
2498 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2499 return other * self.__C
2500 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2501 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2502 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2503 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2504 return numpy.asmatrix(other) * self.__C
2507 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape, self.shape, self.__name))
2508 elif self.isvector() and isinstance(other, numpy.matrix):
2509 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2510 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2511 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2512 return numpy.asmatrix(numpy.array(other) * self.__C)
2515 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape, self.shape, self.__name))
2516 elif self.isscalar() and isinstance(other, numpy.matrix):
2517 return other * self.__C
2518 elif self.isscalar() and isinstance(other, float):
2519 return other * self.__C
2520 elif self.isobject():
2521 return self.__C.__rmul__(other)
2523 raise NotImplementedError(
2524 "%s covariance matrix __rmul__ method not available for %s type!"%(self.__name, type(other)))
2527 "x.__len__() <==> len(x)"
2528 return self.shape[0]
2530 # ==============================================================================
2531 class Observer2Func(object):
2533 Création d'une fonction d'observateur a partir de son texte
2535 __slots__ = ("__corps")
2537 def __init__(self, corps=""):
2538 self.__corps = corps
2540 def func(self, var, info):
2541 "Fonction d'observation"
2545 "Restitution du pointeur de fonction dans l'objet"
2548 # ==============================================================================
2549 class CaseLogger(object):
2551 Conservation des commandes de création d'un cas
2554 "__name", "__objname", "__logSerie", "__switchoff", "__viewers",
2558 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
2559 self.__name = str(__name)
2560 self.__objname = str(__objname)
2561 self.__logSerie = []
2562 self.__switchoff = False
2564 "TUI": Interfaces._TUIViewer,
2565 "SCD": Interfaces._SCDViewer,
2566 "YACS": Interfaces._YACSViewer,
2567 "SimpleReportInRst": Interfaces._SimpleReportInRstViewer,
2568 "SimpleReportInHtml": Interfaces._SimpleReportInHtmlViewer,
2569 "SimpleReportInPlainTxt": Interfaces._SimpleReportInPlainTxtViewer,
2572 "TUI": Interfaces._TUIViewer,
2573 "COM": Interfaces._COMViewer,
2575 if __addViewers is not None:
2576 self.__viewers.update(dict(__addViewers))
2577 if __addLoaders is not None:
2578 self.__loaders.update(dict(__addLoaders))
2580 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
2581 "Enregistrement d'une commande individuelle"
2582 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
2583 if "self" in __keys:
2584 __keys.remove("self")
2585 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
2587 self.__switchoff = True
2589 self.__switchoff = False
2591 def dump(self, __filename=None, __format="TUI", __upa=""):
2592 "Restitution normalisée des commandes"
2593 if __format in self.__viewers:
2594 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2596 raise ValueError("Dumping as \"%s\" is not available"%__format)
2597 return __formater.dump(__filename, __upa)
2599 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2600 "Chargement normalisé des commandes"
2601 if __format in self.__loaders:
2602 __formater = self.__loaders[__format]()
2604 raise ValueError("Loading as \"%s\" is not available"%__format)
2605 return __formater.load(__filename, __content, __object)
2607 # ==============================================================================
2610 _extraArguments = None,
2611 _sFunction = lambda x: x,
2613 _mpWorkers = None ):
2615 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2616 correspondante de valeurs de la fonction en argument
2618 # Vérifications et définitions initiales
2619 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2620 if not PlatformInfo.isIterable( __xserie ):
2621 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2623 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2626 __mpWorkers = int(_mpWorkers)
2628 import multiprocessing
2639 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2640 with multiprocessing.Pool(__mpWorkers) as pool:
2641 __multiHX = pool.map( _sFunction, _jobs )
2644 # logging.debug("MULTF Internal multiprocessing calculation end")
2646 # logging.debug("MULTF Internal monoprocessing calculation begin")
2648 if _extraArguments is None:
2649 for __xvalue in __xserie:
2650 __multiHX.append( _sFunction( __xvalue ) )
2651 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2652 for __xvalue in __xserie:
2653 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2654 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2655 for __xvalue in __xserie:
2656 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2658 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2659 # logging.debug("MULTF Internal monoprocessing calculation end")
2661 # logging.debug("MULTF Internal multifonction calculations end")
2664 # ==============================================================================
2665 if __name__ == "__main__":
2666 print("\n AUTODIAGNOSTIC\n")