1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2023 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
36 from functools import partial
37 from daCore import Persistence
38 from daCore import PlatformInfo
39 from daCore import Interfaces
40 from daCore import Templates
42 # ==============================================================================
43 class CacheManager(object):
45 Classe générale de gestion d'un cache de calculs
48 "__tolerBP", "__lengthOR", "__initlnOR", "__seenNames", "__enabled",
53 toleranceInRedundancy = 1.e-18,
54 lengthOfRedundancy = -1,
57 Les caractéristiques de tolérance peuvent être modifiées à la création.
59 self.__tolerBP = float(toleranceInRedundancy)
60 self.__lengthOR = int(lengthOfRedundancy)
61 self.__initlnOR = self.__lengthOR
71 def wasCalculatedIn(self, xValue, oName="" ):
72 "Vérifie l'existence d'un calcul correspondant à la valeur"
76 for i in range(min(len(self.__listOPCV),self.__lengthOR)-1,-1,-1):
77 if not hasattr(xValue, 'size'):
79 elif (str(oName) != self.__listOPCV[i][3]):
81 elif (xValue.size != self.__listOPCV[i][0].size):
83 elif (numpy.ravel(xValue)[0] - self.__listOPCV[i][0][0]) > (self.__tolerBP * self.__listOPCV[i][2] / self.__listOPCV[i][0].size):
85 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < (self.__tolerBP * self.__listOPCV[i][2]):
87 __HxV = self.__listOPCV[i][1]
91 def storeValueInX(self, xValue, HxValue, oName="" ):
92 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
93 if self.__lengthOR < 0:
94 self.__lengthOR = 2 * min(numpy.size(xValue), 50) + 2
95 self.__initlnOR = self.__lengthOR
96 self.__seenNames.append(str(oName))
97 if str(oName) not in self.__seenNames: # Etend la liste si nouveau
98 self.__lengthOR += 2 * min(numpy.size(xValue), 50) + 2
99 self.__initlnOR += self.__lengthOR
100 self.__seenNames.append(str(oName))
101 while len(self.__listOPCV) > self.__lengthOR:
102 self.__listOPCV.pop(0)
103 self.__listOPCV.append( (
104 copy.copy(numpy.ravel(xValue)), # 0 Previous point
105 copy.copy(HxValue), # 1 Previous value
106 numpy.linalg.norm(xValue), # 2 Norm
107 str(oName), # 3 Operator name
112 self.__initlnOR = self.__lengthOR
114 self.__enabled = False
118 self.__lengthOR = self.__initlnOR
119 self.__enabled = True
121 # ==============================================================================
122 class Operator(object):
124 Classe générale d'interface de type opérateur simple
127 "__name", "__NbCallsAsMatrix", "__NbCallsAsMethod",
128 "__NbCallsOfCached", "__reduceM", "__avoidRC", "__inputAsMF",
129 "__mpEnabled", "__extraArgs", "__Method", "__Matrix", "__Type",
138 name = "GenericOperator",
141 avoidingRedundancy = True,
142 reducingMemoryUse = False,
143 inputAsMultiFunction = False,
144 enableMultiProcess = False,
145 extraArguments = None,
148 On construit un objet de ce type en fournissant, à l'aide de l'un des
149 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
152 - name : nom d'opérateur
153 - fromMethod : argument de type fonction Python
154 - fromMatrix : argument adapté au constructeur numpy.array/matrix
155 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
156 - reducingMemoryUse : booléen forçant (ou pas) des calculs moins
158 - inputAsMultiFunction : booléen indiquant une fonction explicitement
159 définie (ou pas) en multi-fonction
160 - extraArguments : arguments supplémentaires passés à la fonction de
161 base et ses dérivées (tuple ou dictionnaire)
163 self.__name = str(name)
164 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
165 self.__reduceM = bool( reducingMemoryUse )
166 self.__avoidRC = bool( avoidingRedundancy )
167 self.__inputAsMF = bool( inputAsMultiFunction )
168 self.__mpEnabled = bool( enableMultiProcess )
169 self.__extraArgs = extraArguments
170 if fromMethod is not None and self.__inputAsMF:
171 self.__Method = fromMethod # logtimer(fromMethod)
173 self.__Type = "Method"
174 elif fromMethod is not None and not self.__inputAsMF:
175 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
177 self.__Type = "Method"
178 elif fromMatrix is not None:
180 if isinstance(fromMatrix, str):
181 fromMatrix = PlatformInfo.strmatrix2liststr( fromMatrix )
182 self.__Matrix = numpy.asarray( fromMatrix, dtype=float )
183 self.__Type = "Matrix"
189 def disableAvoidingRedundancy(self):
191 Operator.CM.disable()
193 def enableAvoidingRedundancy(self):
198 Operator.CM.disable()
204 def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False):
206 Permet de restituer le résultat de l'application de l'opérateur à une
207 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
208 argument devant a priori être du bon type.
210 - les arguments par série sont :
211 - xValue : argument adapté pour appliquer l'opérateur
212 - HValue : valeur précalculée de l'opérateur en ce point
213 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
220 if HValue is not None:
224 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
226 if _HValue is not None:
227 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
229 for i in range(len(_HValue)):
230 _HxValue.append( _HValue[i] )
232 Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name)
237 for i, xv in enumerate(_xValue):
239 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
241 __alreadyCalculated = False
243 if __alreadyCalculated:
244 self.__addOneCacheCall()
247 if self.__Matrix is not None:
248 self.__addOneMatrixCall()
249 _hv = self.__Matrix @ numpy.ravel(xv)
251 self.__addOneMethodCall()
255 _HxValue.append( _hv )
257 if len(_xserie)>0 and self.__Matrix is None:
258 if self.__extraArgs is None:
259 _hserie = self.__Method( _xserie ) # Calcul MF
261 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
262 if not hasattr(_hserie, "pop"):
264 "The user input multi-function doesn't seem to return a"+\
265 " result sequence, behaving like a mono-function. It has"+\
273 Operator.CM.storeValueInX(_xv,_hv,self.__name)
275 if returnSerieAsArrayMatrix:
276 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
278 if argsAsSerie: return _HxValue
279 else: return _HxValue[-1]
281 def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
283 Permet de restituer le résultat de l'application de l'opérateur à des
284 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
285 argument devant a priori être du bon type. Si la uValue est None,
286 on suppose que l'opérateur ne s'applique qu'à xValue.
288 - paires : les arguments par paire sont :
289 - xValue : argument X adapté pour appliquer l'opérateur
290 - uValue : argument U adapté pour appliquer l'opérateur
291 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
293 if argsAsSerie: _xuValue = paires
294 else: _xuValue = (paires,)
295 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
297 if self.__Matrix is not None:
299 for paire in _xuValue:
300 _xValue, _uValue = paire
301 self.__addOneMatrixCall()
302 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
305 for paire in _xuValue:
306 _xValue, _uValue = paire
307 if _uValue is not None:
308 _xuArgs.append( paire )
310 _xuArgs.append( _xValue )
311 self.__addOneMethodCall( len(_xuArgs) )
312 if self.__extraArgs is None:
313 _HxValue = self.__Method( _xuArgs ) # Calcul MF
315 _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF
317 if returnSerieAsArrayMatrix:
318 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
320 if argsAsSerie: return _HxValue
321 else: return _HxValue[-1]
323 def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
325 Permet de restituer le résultat de l'application de l'opérateur à une
326 série d'arguments xValue, sachant que l'opérateur est valable en
327 xNominal. Cette méthode se contente d'appliquer, son argument devant a
328 priori être du bon type. Si l'opérateur est linéaire car c'est une
329 matrice, alors il est valable en tout point nominal et xNominal peut
330 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
331 permet d'indiquer que l'argument est multi-paires.
333 - paires : les arguments par paire sont :
334 - xNominal : série d'arguments permettant de donner le point où
335 l'opérateur est construit pour être ensuite appliqué
336 - xValue : série d'arguments adaptés pour appliquer l'opérateur
337 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
339 if argsAsSerie: _nxValue = paires
340 else: _nxValue = (paires,)
341 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
343 if self.__Matrix is not None:
345 for paire in _nxValue:
346 _xNominal, _xValue = paire
347 self.__addOneMatrixCall()
348 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
350 self.__addOneMethodCall( len(_nxValue) )
351 if self.__extraArgs is None:
352 _HxValue = self.__Method( _nxValue ) # Calcul MF
354 _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
356 if returnSerieAsArrayMatrix:
357 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
359 if argsAsSerie: return _HxValue
360 else: return _HxValue[-1]
362 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
364 Permet de renvoyer l'opérateur sous la forme d'une matrice
366 if self.__Matrix is not None:
367 self.__addOneMatrixCall()
368 mValue = [self.__Matrix,]
369 elif not isinstance(ValueForMethodForm,str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
372 self.__addOneMethodCall( len(ValueForMethodForm) )
373 for _vfmf in ValueForMethodForm:
374 mValue.append( self.__Method(((_vfmf, None),)) )
376 self.__addOneMethodCall()
377 mValue = self.__Method(((ValueForMethodForm, None),))
379 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
381 if argsAsSerie: return mValue
382 else: return mValue[-1]
386 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
387 la forme d'une matrice
389 if self.__Matrix is not None:
390 return self.__Matrix.shape
392 raise ValueError("Matrix form of the operator is not available, nor the shape")
394 def nbcalls(self, which=None):
396 Renvoie les nombres d'évaluations de l'opérateur
399 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
400 self.__NbCallsAsMatrix,
401 self.__NbCallsAsMethod,
402 self.__NbCallsOfCached,
403 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
404 Operator.NbCallsAsMatrix,
405 Operator.NbCallsAsMethod,
406 Operator.NbCallsOfCached,
408 if which is None: return __nbcalls
409 else: return __nbcalls[which]
411 def __addOneMatrixCall(self):
412 "Comptabilise un appel"
413 self.__NbCallsAsMatrix += 1 # Decompte local
414 Operator.NbCallsAsMatrix += 1 # Decompte global
416 def __addOneMethodCall(self, nb = 1):
417 "Comptabilise un appel"
418 self.__NbCallsAsMethod += nb # Decompte local
419 Operator.NbCallsAsMethod += nb # Decompte global
421 def __addOneCacheCall(self):
422 "Comptabilise un appel"
423 self.__NbCallsOfCached += 1 # Decompte local
424 Operator.NbCallsOfCached += 1 # Decompte global
426 # ==============================================================================
427 class FullOperator(object):
429 Classe générale d'interface de type opérateur complet
430 (Direct, Linéaire Tangent, Adjoint)
433 "__name", "__check", "__extraArgs", "__FO", "__T",
437 name = "GenericFullOperator",
439 asOneFunction = None, # 1 Fonction
440 asThreeFunctions = None, # 3 Fonctions in a dictionary
441 asScript = None, # 1 or 3 Fonction(s) by script
442 asDict = None, # Parameters
444 extraArguments = None,
445 performancePrf = None,
446 inputAsMF = False,# Fonction(s) as Multi-Functions
451 self.__name = str(name)
452 self.__check = bool(toBeChecked)
453 self.__extraArgs = extraArguments
458 if (asDict is not None) and isinstance(asDict, dict):
459 __Parameters.update( asDict )
460 # Priorité à EnableMultiProcessingInDerivatives=True
461 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
462 __Parameters["EnableMultiProcessingInDerivatives"] = True
463 __Parameters["EnableMultiProcessingInEvaluation"] = False
464 if "EnableMultiProcessingInDerivatives" not in __Parameters:
465 __Parameters["EnableMultiProcessingInDerivatives"] = False
466 if __Parameters["EnableMultiProcessingInDerivatives"]:
467 __Parameters["EnableMultiProcessingInEvaluation"] = False
468 if "EnableMultiProcessingInEvaluation" not in __Parameters:
469 __Parameters["EnableMultiProcessingInEvaluation"] = False
470 if "withIncrement" in __Parameters: # Temporaire
471 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
472 # Le défaut est équivalent à "ReducedOverallRequirements"
473 __reduceM, __avoidRC = True, True
474 if performancePrf is not None:
475 if performancePrf == "ReducedAmountOfCalculation":
476 __reduceM, __avoidRC = False, True
477 elif performancePrf == "ReducedMemoryFootprint":
478 __reduceM, __avoidRC = True, False
479 elif performancePrf == "NoSavings":
480 __reduceM, __avoidRC = False, False
482 if asScript is not None:
483 __Matrix, __Function = None, None
485 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
487 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
488 __Function.update({"useApproximatedDerivatives":True})
489 __Function.update(__Parameters)
490 elif asThreeFunctions:
492 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
493 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
494 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
496 __Function.update(__Parameters)
499 if asOneFunction is not None:
500 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
501 if asOneFunction["Direct"] is not None:
502 __Function = asOneFunction
504 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
506 __Function = { "Direct":asOneFunction }
507 __Function.update({"useApproximatedDerivatives":True})
508 __Function.update(__Parameters)
509 elif asThreeFunctions is not None:
510 if isinstance(asThreeFunctions, dict) and \
511 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
512 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
513 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
514 __Function = asThreeFunctions
515 elif isinstance(asThreeFunctions, dict) and \
516 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
517 __Function = asThreeFunctions
518 __Function.update({"useApproximatedDerivatives":True})
521 "The functions has to be given in a dictionnary which have either"+\
522 " 1 key (\"Direct\") or"+\
523 " 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
524 if "Direct" not in asThreeFunctions:
525 __Function["Direct"] = asThreeFunctions["Tangent"]
526 __Function.update(__Parameters)
530 if appliedInX is not None and isinstance(appliedInX, dict):
531 __appliedInX = appliedInX
532 elif appliedInX is not None:
533 __appliedInX = {"HXb":appliedInX}
537 if scheduledBy is not None:
538 self.__T = scheduledBy
540 if isinstance(__Function, dict) and \
541 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
542 ("Direct" in __Function) and (__Function["Direct"] is not None):
543 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
544 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
545 if "withdX" not in __Function: __Function["withdX"] = None
546 if "withReducingMemoryUse" not in __Function: __Function["withReducingMemoryUse"] = __reduceM
547 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = __avoidRC
548 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
549 if "withLengthOfRedundancy" not in __Function: __Function["withLengthOfRedundancy"] = -1
550 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
551 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
552 from daCore import NumericObjects
553 FDA = NumericObjects.FDApproximation(
555 Function = __Function["Direct"],
556 centeredDF = __Function["CenteredFiniteDifference"],
557 increment = __Function["DifferentialIncrement"],
558 dX = __Function["withdX"],
559 extraArguments = self.__extraArgs,
560 reducingMemoryUse = __Function["withReducingMemoryUse"],
561 avoidingRedundancy = __Function["withAvoidingRedundancy"],
562 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
563 lengthOfRedundancy = __Function["withLengthOfRedundancy"],
564 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
565 mpWorkers = __Function["NumberOfProcesses"],
566 mfEnabled = __Function["withmfEnabled"],
568 self.__FO["Direct"] = Operator(
570 fromMethod = FDA.DirectOperator,
571 reducingMemoryUse = __reduceM,
572 avoidingRedundancy = __avoidRC,
573 inputAsMultiFunction = inputAsMF,
574 extraArguments = self.__extraArgs,
575 enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
576 self.__FO["Tangent"] = Operator(
577 name = self.__name+"Tangent",
578 fromMethod = FDA.TangentOperator,
579 reducingMemoryUse = __reduceM,
580 avoidingRedundancy = __avoidRC,
581 inputAsMultiFunction = inputAsMF,
582 extraArguments = self.__extraArgs )
583 self.__FO["Adjoint"] = Operator(
584 name = self.__name+"Adjoint",
585 fromMethod = FDA.AdjointOperator,
586 reducingMemoryUse = __reduceM,
587 avoidingRedundancy = __avoidRC,
588 inputAsMultiFunction = inputAsMF,
589 extraArguments = self.__extraArgs )
590 self.__FO["DifferentialIncrement"] = __Function["DifferentialIncrement"]
591 elif isinstance(__Function, dict) and \
592 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
593 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
594 self.__FO["Direct"] = Operator(
596 fromMethod = __Function["Direct"],
597 reducingMemoryUse = __reduceM,
598 avoidingRedundancy = __avoidRC,
599 inputAsMultiFunction = inputAsMF,
600 extraArguments = self.__extraArgs,
601 enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
602 self.__FO["Tangent"] = Operator(
603 name = self.__name+"Tangent",
604 fromMethod = __Function["Tangent"],
605 reducingMemoryUse = __reduceM,
606 avoidingRedundancy = __avoidRC,
607 inputAsMultiFunction = inputAsMF,
608 extraArguments = self.__extraArgs )
609 self.__FO["Adjoint"] = Operator(
610 name = self.__name+"Adjoint",
611 fromMethod = __Function["Adjoint"],
612 reducingMemoryUse = __reduceM,
613 avoidingRedundancy = __avoidRC,
614 inputAsMultiFunction = inputAsMF,
615 extraArguments = self.__extraArgs )
616 self.__FO["DifferentialIncrement"] = None
617 elif asMatrix is not None:
618 if isinstance(__Matrix, str):
619 __Matrix = PlatformInfo.strmatrix2liststr( __Matrix )
620 __matrice = numpy.asarray( __Matrix, dtype=float )
621 self.__FO["Direct"] = Operator(
623 fromMatrix = __matrice,
624 reducingMemoryUse = __reduceM,
625 avoidingRedundancy = __avoidRC,
626 inputAsMultiFunction = inputAsMF,
627 enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
628 self.__FO["Tangent"] = Operator(
629 name = self.__name+"Tangent",
630 fromMatrix = __matrice,
631 reducingMemoryUse = __reduceM,
632 avoidingRedundancy = __avoidRC,
633 inputAsMultiFunction = inputAsMF )
634 self.__FO["Adjoint"] = Operator(
635 name = self.__name+"Adjoint",
636 fromMatrix = __matrice.T,
637 reducingMemoryUse = __reduceM,
638 avoidingRedundancy = __avoidRC,
639 inputAsMultiFunction = inputAsMF )
641 self.__FO["DifferentialIncrement"] = None
644 "The %s object is improperly defined or undefined,"%self.__name+\
645 " it requires at minima either a matrix, a Direct operator for"+\
646 " approximate derivatives or a Tangent/Adjoint operators pair."+\
647 " Please check your operator input.")
649 if __appliedInX is not None:
650 self.__FO["AppliedInX"] = {}
651 for key in __appliedInX:
652 if isinstance(__appliedInX[key], str):
653 __appliedInX[key] = PlatformInfo.strvect2liststr( __appliedInX[key] )
654 self.__FO["AppliedInX"][key] = numpy.ravel( __appliedInX[key] ).reshape((-1,1))
656 self.__FO["AppliedInX"] = None
662 "x.__repr__() <==> repr(x)"
663 return repr(self.__FO)
666 "x.__str__() <==> str(x)"
667 return str(self.__FO)
669 # ==============================================================================
670 class Algorithm(object):
672 Classe générale d'interface de type algorithme
674 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
675 d'assimilation, en fournissant un container (dictionnaire) de variables
676 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
678 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
681 "_name", "_parameters", "__internal_state", "__required_parameters",
682 "_m", "__variable_names_not_public", "__canonical_parameter_name",
683 "__canonical_stored_name", "__replace_by_the_new_name",
687 def __init__(self, name):
689 L'initialisation présente permet de fabriquer des variables de stockage
690 disponibles de manière générique dans les algorithmes élémentaires. Ces
691 variables de stockage sont ensuite conservées dans un dictionnaire
692 interne à l'objet, mais auquel on accède par la méthode "get".
694 Les variables prévues sont :
695 - APosterioriCorrelations : matrice de corrélations de la matrice A
696 - APosterioriCovariance : matrice de covariances a posteriori : A
697 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
698 - APosterioriVariances : vecteur des variances de la matrice A
699 - Analysis : vecteur d'analyse : Xa
700 - BMA : Background moins Analysis : Xa - Xb
701 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
702 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
703 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
704 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
705 - CostFunctionJo : partie observations de la fonction-coût : Jo
706 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
707 - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
708 - CurrentOptimum : état optimal courant lors d'itérations
709 - CurrentState : état courant lors d'itérations
710 - CurrentStepNumber : pas courant d'avancement dans les algorithmes en évolution, à partir de 0
711 - EnsembleOfSimulations : ensemble d'états (sorties, simulations) rangés par colonne dans une matrice
712 - EnsembleOfSnapshots : ensemble d'états rangés par colonne dans une matrice
713 - EnsembleOfStates : ensemble d'états (entrées, paramètres) rangés par colonne dans une matrice
714 - ForecastCovariance : covariance de l'état prédit courant lors d'itérations
715 - ForecastState : état prédit courant lors d'itérations
716 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
717 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
718 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
719 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
720 - Innovation : l'innovation : d = Y - H(X)
721 - InnovationAtCurrentAnalysis : l'innovation à l'état analysé : da = Y - H(Xa)
722 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
723 - InternalCostFunctionJ : ensemble de valeurs internes de fonction-coût J dans un vecteur
724 - InternalCostFunctionJb : ensemble de valeurs internes de fonction-coût Jb dans un vecteur
725 - InternalCostFunctionJb : ensemble de valeurs internes de fonction-coût Jo dans un vecteur
726 - InternalStates : ensemble d'états internes rangés par colonne dans une matrice (=EnsembleOfSnapshots)
727 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
728 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
729 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
730 - KalmanGainAtOptimum : gain de Kalman à l'optimum
731 - MahalanobisConsistency : indicateur de consistance des covariances
732 - OMA : Observation moins Analyse : Y - Xa
733 - OMB : Observation moins Background : Y - Xb
734 - ReducedCoordinates : coordonnées dans la base réduite
735 - Residu : dans le cas des algorithmes de vérification
736 - SampledStateForQuantiles : échantillons d'états pour l'estimation des quantiles
737 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
738 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
739 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
740 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
741 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
742 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
743 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
744 - SingularValues : valeurs singulières provenant d'une décomposition SVD
745 On peut rajouter des variables à stocker dans l'initialisation de
746 l'algorithme élémentaire qui va hériter de cette classe
748 logging.debug("%s Initialisation", str(name))
749 self._m = PlatformInfo.SystemUsage()
751 self._name = str( name )
752 self._parameters = {"StoreSupplementaryCalculations":[]}
753 self.__internal_state = {}
754 self.__required_parameters = {}
755 self.__required_inputs = {
756 "RequiredInputValues":{"mandatory":(), "optional":()},
757 "ClassificationTags":[],
759 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
760 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
761 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
762 self.__replace_by_the_new_name = {} # Nouveau nom à partir d'un nom ancien
764 self.StoredVariables = {}
765 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
766 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
767 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
768 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
769 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
770 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
771 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
772 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
773 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
774 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
775 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
776 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
777 self.StoredVariables["CurrentEnsembleState"] = Persistence.OneMatrix(name = "CurrentEnsembleState")
778 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
779 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
780 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
781 self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
782 self.StoredVariables["EnsembleOfSimulations"] = Persistence.OneMatrice(name = "EnsembleOfSimulations")
783 self.StoredVariables["EnsembleOfSnapshots"] = Persistence.OneMatrice(name = "EnsembleOfSnapshots")
784 self.StoredVariables["EnsembleOfStates"] = Persistence.OneMatrice(name = "EnsembleOfStates")
785 self.StoredVariables["ExcludedPoints"] = Persistence.OneVector(name = "ExcludedPoints")
786 self.StoredVariables["ForecastCovariance"] = Persistence.OneMatrix(name = "ForecastCovariance")
787 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
788 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
789 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
790 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
791 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
792 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
793 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
794 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
795 self.StoredVariables["InternalCostFunctionJ"] = Persistence.OneVector(name = "InternalCostFunctionJ")
796 self.StoredVariables["InternalCostFunctionJb"] = Persistence.OneVector(name = "InternalCostFunctionJb")
797 self.StoredVariables["InternalCostFunctionJo"] = Persistence.OneVector(name = "InternalCostFunctionJo")
798 self.StoredVariables["InternalStates"] = Persistence.OneMatrix(name = "InternalStates")
799 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
800 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
801 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
802 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
803 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
804 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
805 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
806 self.StoredVariables["OptimalPoints"] = Persistence.OneVector(name = "OptimalPoints")
807 self.StoredVariables["ReducedBasis"] = Persistence.OneMatrix(name = "ReducedBasis")
808 self.StoredVariables["ReducedCoordinates"] = Persistence.OneVector(name = "ReducedCoordinates")
809 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
810 self.StoredVariables["Residus"] = Persistence.OneVector(name = "Residus")
811 self.StoredVariables["SampledStateForQuantiles"] = Persistence.OneMatrix(name = "SampledStateForQuantiles")
812 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
813 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
814 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
815 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
816 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
817 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
818 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
819 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
820 self.StoredVariables["SingularValues"] = Persistence.OneVector(name = "SingularValues")
822 for k in self.StoredVariables:
823 self.__canonical_stored_name[k.lower()] = k
825 for k, v in self.__variable_names_not_public.items():
826 self.__canonical_parameter_name[k.lower()] = k
827 self.__canonical_parameter_name["algorithm"] = "Algorithm"
828 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
830 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
832 logging.debug("%s Lancement", self._name)
833 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
834 self._getTimeState(reset=True)
836 # Mise à jour des paramètres internes avec le contenu de Parameters, en
837 # reprenant les valeurs par défauts pour toutes celles non définies
838 self.__setParameters(Parameters, reset=True)
839 for k, v in self.__variable_names_not_public.items():
840 if k not in self._parameters: self.__setParameters( {k:v} )
842 # Corrections et compléments des vecteurs
843 def __test_vvalue(argument, variable, argname, symbol=None):
844 if symbol is None: symbol = variable
846 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
847 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol))
848 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
849 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol))
851 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol))
853 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
854 logging.debug("%s %s vector %s is required and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
855 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
856 logging.debug("%s %s vector %s is optional and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
859 "%s %s vector %s is set although neither required nor optional, and its size is %i."%(
860 self._name,argname,symbol,numpy.array(argument).size))
862 __test_vvalue( Xb, "Xb", "Background or initial state" )
863 __test_vvalue( Y, "Y", "Observation" )
864 __test_vvalue( U, "U", "Control" )
866 # Corrections et compléments des covariances
867 def __test_cvalue(argument, variable, argname, symbol=None):
868 if symbol is None: symbol = variable
870 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
871 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol))
872 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
873 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol))
875 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol))
877 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
878 logging.debug("%s %s error covariance matrix %s is required and set."%(self._name,argname,symbol))
879 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
880 logging.debug("%s %s error covariance matrix %s is optional and set."%(self._name,argname,symbol))
882 logging.debug("%s %s error covariance matrix %s is set although neither required nor optional."%(self._name,argname,symbol))
884 __test_cvalue( B, "B", "Background" )
885 __test_cvalue( R, "R", "Observation" )
886 __test_cvalue( Q, "Q", "Evolution" )
888 # Corrections et compléments des opérateurs
889 def __test_ovalue(argument, variable, argname, symbol=None):
890 if symbol is None: symbol = variable
891 if argument is None or (isinstance(argument,dict) and len(argument)==0):
892 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
893 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol))
894 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
895 logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol))
897 logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol))
899 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
900 logging.debug("%s %s operator %s is required and set."%(self._name,argname,symbol))
901 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
902 logging.debug("%s %s operator %s is optional and set."%(self._name,argname,symbol))
904 logging.debug("%s %s operator %s is set although neither required nor optional."%(self._name,argname,symbol))
906 __test_ovalue( HO, "HO", "Observation", "H" )
907 __test_ovalue( EM, "EM", "Evolution", "M" )
908 __test_ovalue( CM, "CM", "Control Model", "C" )
910 # Corrections et compléments des bornes
911 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
912 logging.debug("%s Bounds taken into account"%(self._name,))
914 self._parameters["Bounds"] = None
915 if ("StateBoundsForQuantiles" in self._parameters) \
916 and isinstance(self._parameters["StateBoundsForQuantiles"], (list, tuple)) \
917 and (len(self._parameters["StateBoundsForQuantiles"]) > 0):
918 logging.debug("%s Bounds for quantiles states taken into account"%(self._name,))
919 # Attention : contrairement à Bounds, pas de défaut à None, sinon on ne peut pas être sans bornes
921 # Corrections et compléments de l'initialisation en X
922 if "InitializationPoint" in self._parameters:
924 if self._parameters["InitializationPoint"] is not None and hasattr(self._parameters["InitializationPoint"],'size'):
925 if self._parameters["InitializationPoint"].size != numpy.ravel(Xb).size:
926 raise ValueError("Incompatible size %i of forced initial point that have to replace the background of size %i" \
927 %(self._parameters["InitializationPoint"].size,numpy.ravel(Xb).size))
928 # Obtenu par typecast : numpy.ravel(self._parameters["InitializationPoint"])
930 self._parameters["InitializationPoint"] = numpy.ravel(Xb)
932 if self._parameters["InitializationPoint"] is None:
933 raise ValueError("Forced initial point can not be set without any given Background or required value")
935 # Correction pour pallier a un bug de TNC sur le retour du Minimum
936 if "Minimizer" in self._parameters and self._parameters["Minimizer"] == "TNC":
937 self.setParameterValue("StoreInternalVariables",True)
939 # Verbosité et logging
940 if logging.getLogger().level < logging.WARNING:
941 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
942 self._parameters["optmessages"] = 15
944 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
945 self._parameters["optmessages"] = 0
949 def _post_run(self,_oH=None):
951 if ("StoreSupplementaryCalculations" in self._parameters) and \
952 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
953 for _A in self.StoredVariables["APosterioriCovariance"]:
954 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
955 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
956 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
957 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
958 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
959 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
960 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
961 self.StoredVariables["APosterioriCorrelations"].store( _C )
962 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
964 "%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i",
965 self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
967 "%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i",
968 self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
969 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
970 logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
971 logging.debug("%s Terminé", self._name)
974 def _toStore(self, key):
975 "True if in StoreSupplementaryCalculations, else False"
976 return key in self._parameters["StoreSupplementaryCalculations"]
978 def get(self, key=None):
980 Renvoie l'une des variables stockées identifiée par la clé, ou le
981 dictionnaire de l'ensemble des variables disponibles en l'absence de
982 clé. Ce sont directement les variables sous forme objet qui sont
983 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
984 des classes de persistance.
987 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
989 return self.StoredVariables
991 def __contains__(self, key=None):
992 "D.__contains__(k) -> True if D has a key k, else False"
993 if key is None or key.lower() not in self.__canonical_stored_name:
996 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
999 "D.keys() -> list of D's keys"
1000 if hasattr(self, "StoredVariables"):
1001 return self.StoredVariables.keys()
1005 def pop(self, k, d):
1006 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
1007 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
1008 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
1013 raise TypeError("pop expected at least 1 arguments, got 0")
1014 "If key is not found, d is returned if given, otherwise KeyError is raised"
1020 def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
1022 Doit implémenter l'opération élémentaire de calcul algorithmique.
1024 raise NotImplementedError("Mathematical algorithmic calculation has not been implemented!")
1026 def defineRequiredParameter(self,
1038 Permet de définir dans l'algorithme des paramètres requis et leurs
1039 caractéristiques par défaut.
1042 raise ValueError("A name is mandatory to define a required parameter.")
1044 self.__required_parameters[name] = {
1045 "default" : default,
1046 "typecast" : typecast,
1049 "listval" : listval,
1050 "listadv" : listadv,
1051 "message" : message,
1052 "oldname" : oldname,
1054 self.__canonical_parameter_name[name.lower()] = name
1055 if oldname is not None:
1056 self.__canonical_parameter_name[oldname.lower()] = name # Conversion
1057 self.__replace_by_the_new_name[oldname.lower()] = name
1058 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
1060 def getRequiredParameters(self, noDetails=True):
1062 Renvoie la liste des noms de paramètres requis ou directement le
1063 dictionnaire des paramètres requis.
1066 return sorted(self.__required_parameters.keys())
1068 return self.__required_parameters
1070 def setParameterValue(self, name=None, value=None):
1072 Renvoie la valeur d'un paramètre requis de manière contrôlée
1074 __k = self.__canonical_parameter_name[name.lower()]
1075 default = self.__required_parameters[__k]["default"]
1076 typecast = self.__required_parameters[__k]["typecast"]
1077 minval = self.__required_parameters[__k]["minval"]
1078 maxval = self.__required_parameters[__k]["maxval"]
1079 listval = self.__required_parameters[__k]["listval"]
1080 listadv = self.__required_parameters[__k]["listadv"]
1082 if value is None and default is None:
1084 elif value is None and default is not None:
1085 if typecast is None: __val = default
1086 else: __val = typecast( default )
1088 if typecast is None: __val = value
1091 __val = typecast( value )
1093 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
1095 if minval is not None and (numpy.array(__val, float) < minval).any():
1096 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
1097 if maxval is not None and (numpy.array(__val, float) > maxval).any():
1098 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
1099 if listval is not None or listadv is not None:
1100 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
1102 if listval is not None and v in listval: continue
1103 elif listadv is not None and v in listadv: continue
1105 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
1106 elif not (listval is not None and __val in listval) and not (listadv is not None and __val in listadv):
1107 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
1109 if __k in ["SetSeed",]:
1114 def requireInputArguments(self, mandatory=(), optional=()):
1116 Permet d'imposer des arguments de calcul requis en entrée.
1118 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
1119 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
1121 def getInputArguments(self):
1123 Permet d'obtenir les listes des arguments de calcul requis en entrée.
1125 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
1127 def setAttributes(self, tags=()):
1129 Permet d'adjoindre des attributs comme les tags de classification.
1130 Renvoie la liste actuelle dans tous les cas.
1132 self.__required_inputs["ClassificationTags"].extend( tags )
1133 return self.__required_inputs["ClassificationTags"]
1135 def __setParameters(self, fromDico={}, reset=False):
1137 Permet de stocker les paramètres reçus dans le dictionnaire interne.
1139 self._parameters.update( fromDico )
1140 __inverse_fromDico_keys = {}
1141 for k in fromDico.keys():
1142 if k.lower() in self.__canonical_parameter_name:
1143 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
1144 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
1145 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
1147 for k in __inverse_fromDico_keys.values():
1148 if k.lower() in self.__replace_by_the_new_name:
1149 __newk = self.__replace_by_the_new_name[k.lower()]
1150 __msg = "the parameter \"%s\" used in \"%s\" algorithm case is deprecated and has to be replaced by \"%s\"."%(k,self._name,__newk)
1151 __msg += " Please update your code."
1152 warnings.warn(__msg, FutureWarning, stacklevel=50)
1154 for k in self.__required_parameters.keys():
1155 if k in __canonic_fromDico_keys:
1156 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
1158 self._parameters[k] = self.setParameterValue(k)
1161 if hasattr(self._parameters[k],"__len__") and len(self._parameters[k]) > 100:
1162 logging.debug("%s %s de longueur %s", self._name, self.__required_parameters[k]["message"], len(self._parameters[k]))
1164 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
1166 def _setInternalState(self, key=None, value=None, fromDico={}, reset=False):
1168 Permet de stocker des variables nommées constituant l'état interne
1170 if reset: # Vide le dictionnaire préalablement
1171 self.__internal_state = {}
1172 if key is not None and value is not None:
1173 self.__internal_state[key] = value
1174 self.__internal_state.update( dict(fromDico) )
1176 def _getInternalState(self, key=None):
1178 Restitue un état interne sous la forme d'un dictionnaire de variables nommées
1180 if key is not None and key in self.__internal_state:
1181 return self.__internal_state[key]
1183 return self.__internal_state
1185 def _getTimeState(self, reset=False):
1187 Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde
1190 self.__initial_cpu_time = time.process_time()
1191 self.__initial_elapsed_time = time.perf_counter()
1194 self.__cpu_time = time.process_time() - self.__initial_cpu_time
1195 self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time
1196 return self.__cpu_time, self.__elapsed_time
1198 def _StopOnTimeLimit(self, X=None, withReason=False):
1199 "Stop criteria on time limit: True/False [+ Reason]"
1200 c, e = self._getTimeState()
1201 if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]:
1202 __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"])
1203 elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]:
1204 __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"])
1206 __SC, __SR = False, ""
1212 # ==============================================================================
1213 class PartialAlgorithm(object):
1215 Classe pour mimer "Algorithm" du point de vue stockage, mais sans aucune
1216 action avancée comme la vérification . Pour les méthodes reprises ici,
1217 le fonctionnement est identique à celles de la classe "Algorithm".
1220 "_name", "_parameters", "StoredVariables", "__canonical_stored_name",
1223 def __init__(self, name):
1224 self._name = str( name )
1225 self._parameters = {"StoreSupplementaryCalculations":[]}
1227 self.StoredVariables = {}
1228 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
1229 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
1230 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
1231 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
1232 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
1233 self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
1235 self.__canonical_stored_name = {}
1236 for k in self.StoredVariables:
1237 self.__canonical_stored_name[k.lower()] = k
1239 def _toStore(self, key):
1240 "True if in StoreSupplementaryCalculations, else False"
1241 return key in self._parameters["StoreSupplementaryCalculations"]
1243 def get(self, key=None):
1245 Renvoie l'une des variables stockées identifiée par la clé, ou le
1246 dictionnaire de l'ensemble des variables disponibles en l'absence de
1247 clé. Ce sont directement les variables sous forme objet qui sont
1248 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
1249 des classes de persistance.
1252 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
1254 return self.StoredVariables
1256 # ==============================================================================
1257 class AlgorithmAndParameters(object):
1259 Classe générale d'interface d'action pour l'algorithme et ses paramètres
1262 "__name", "__algorithm", "__algorithmFile", "__algorithmName", "__A",
1263 "__P", "__Xb", "__Y", "__U", "__HO", "__EM", "__CM", "__B", "__R",
1264 "__Q", "__variable_names_not_public",
1268 name = "GenericAlgorithm",
1275 self.__name = str(name)
1279 self.__algorithm = {}
1280 self.__algorithmFile = None
1281 self.__algorithmName = None
1283 self.updateParameters( asDict, asScript )
1285 if asAlgorithm is None and asScript is not None:
1286 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1288 __Algo = asAlgorithm
1290 if __Algo is not None:
1291 self.__A = str(__Algo)
1292 self.__P.update( {"Algorithm":self.__A} )
1294 self.__setAlgorithm( self.__A )
1296 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
1298 def updateParameters(self,
1302 "Mise à jour des paramètres"
1303 if asDict is None and asScript is not None:
1304 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1308 if __Dict is not None:
1309 self.__P.update( dict(__Dict) )
1311 def executePythonScheme(self, asDictAO = None):
1312 "Permet de lancer le calcul d'assimilation"
1313 Operator.CM.clearCache()
1315 if not isinstance(asDictAO, dict):
1316 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1317 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
1318 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
1319 else: self.__Xb = None
1320 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
1321 else: self.__Y = asDictAO["Observation"]
1322 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
1323 else: self.__U = asDictAO["ControlInput"]
1324 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
1325 else: self.__HO = asDictAO["ObservationOperator"]
1326 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
1327 else: self.__EM = asDictAO["EvolutionModel"]
1328 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
1329 else: self.__CM = asDictAO["ControlModel"]
1330 self.__B = asDictAO["BackgroundError"]
1331 self.__R = asDictAO["ObservationError"]
1332 self.__Q = asDictAO["EvolutionError"]
1334 self.__shape_validate()
1336 self.__algorithm.run(
1346 Parameters = self.__P,
1350 def executeYACSScheme(self, FileName=None):
1351 "Permet de lancer le calcul d'assimilation"
1352 if FileName is None or not os.path.exists(FileName):
1353 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1355 __file = os.path.abspath(FileName)
1356 logging.debug("The YACS file name is \"%s\"."%__file)
1357 if not PlatformInfo.has_salome or \
1358 not PlatformInfo.has_yacs or \
1359 not PlatformInfo.has_adao:
1360 raise ImportError("\n\n"+\
1361 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
1362 "Please load the right environnement before trying to use it.\n")
1365 import SALOMERuntime
1367 SALOMERuntime.RuntimeSALOME_setRuntime()
1369 r = pilot.getRuntime()
1370 xmlLoader = loader.YACSLoader()
1371 xmlLoader.registerProcCataLoader()
1373 catalogAd = r.loadCatalog("proc", __file)
1374 r.addCatalog(catalogAd)
1379 p = xmlLoader.load(__file)
1380 except IOError as ex:
1381 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1383 logger = p.getLogger("parser")
1384 if not logger.isEmpty():
1385 print("The imported YACS XML schema has errors on parsing:")
1386 print(logger.getStr())
1389 print("The YACS XML schema is not valid and will not be executed:")
1390 print(p.getErrorReport())
1392 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1393 p.checkConsistency(info)
1394 if info.areWarningsOrErrors():
1395 print("The YACS XML schema is not coherent and will not be executed:")
1396 print(info.getGlobalRepr())
1398 e = pilot.ExecutorSwig()
1400 if p.getEffectiveState() != pilot.DONE:
1401 print(p.getErrorReport())
1405 def get(self, key = None):
1406 "Vérifie l'existence d'une clé de variable ou de paramètres"
1407 if key in self.__algorithm:
1408 return self.__algorithm.get( key )
1409 elif key in self.__P:
1410 return self.__P[key]
1412 allvariables = self.__P
1413 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1416 def pop(self, k, d):
1417 "Necessaire pour le pickling"
1418 return self.__algorithm.pop(k, d)
1420 def getAlgorithmRequiredParameters(self, noDetails=True):
1421 "Renvoie la liste des paramètres requis selon l'algorithme"
1422 return self.__algorithm.getRequiredParameters(noDetails)
1424 def getAlgorithmInputArguments(self):
1425 "Renvoie la liste des entrées requises selon l'algorithme"
1426 return self.__algorithm.getInputArguments()
1428 def getAlgorithmAttributes(self):
1429 "Renvoie la liste des attributs selon l'algorithme"
1430 return self.__algorithm.setAttributes()
1432 def setObserver(self, __V, __O, __I, __S):
1433 if self.__algorithm is None \
1434 or isinstance(self.__algorithm, dict) \
1435 or not hasattr(self.__algorithm,"StoredVariables"):
1436 raise ValueError("No observer can be build before choosing an algorithm.")
1437 if __V not in self.__algorithm:
1438 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1440 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1443 HookParameters = __I,
1446 def removeObserver(self, __V, __O, __A = False):
1447 if self.__algorithm is None \
1448 or isinstance(self.__algorithm, dict) \
1449 or not hasattr(self.__algorithm,"StoredVariables"):
1450 raise ValueError("No observer can be removed before choosing an algorithm.")
1451 if __V not in self.__algorithm:
1452 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1454 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1459 def hasObserver(self, __V):
1460 if self.__algorithm is None \
1461 or isinstance(self.__algorithm, dict) \
1462 or not hasattr(self.__algorithm,"StoredVariables"):
1464 if __V not in self.__algorithm:
1466 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1469 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1470 for k in self.__variable_names_not_public:
1471 if k in __allvariables: __allvariables.remove(k)
1472 return __allvariables
1474 def __contains__(self, key=None):
1475 "D.__contains__(k) -> True if D has a key k, else False"
1476 return key in self.__algorithm or key in self.__P
1479 "x.__repr__() <==> repr(x)"
1480 return repr(self.__A)+", "+repr(self.__P)
1483 "x.__str__() <==> str(x)"
1484 return str(self.__A)+", "+str(self.__P)
1486 def __setAlgorithm(self, choice = None ):
1488 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1489 d'assimilation. L'argument est un champ caractère se rapportant au nom
1490 d'un algorithme réalisant l'opération sur les arguments fixes.
1493 raise ValueError("Error: algorithm choice has to be given")
1494 if self.__algorithmName is not None:
1495 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1496 daDirectory = "daAlgorithms"
1498 # Recherche explicitement le fichier complet
1499 # ------------------------------------------
1501 for directory in sys.path:
1502 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1503 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1504 if module_path is None:
1506 "No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1508 # Importe le fichier complet comme un module
1509 # ------------------------------------------
1511 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1512 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1513 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1514 raise ImportError("this module does not define a valid elementary algorithm.")
1515 self.__algorithmName = str(choice)
1516 sys.path = sys_path_tmp ; del sys_path_tmp
1517 except ImportError as e:
1519 "The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1521 # Instancie un objet du type élémentaire du fichier
1522 # -------------------------------------------------
1523 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1526 def __shape_validate(self):
1528 Validation de la correspondance correcte des tailles des variables et
1529 des matrices s'il y en a.
1531 if self.__Xb is None: __Xb_shape = (0,)
1532 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1533 elif hasattr(self.__Xb,"shape"):
1534 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1535 else: __Xb_shape = self.__Xb.shape()
1536 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1538 if self.__Y is None: __Y_shape = (0,)
1539 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1540 elif hasattr(self.__Y,"shape"):
1541 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1542 else: __Y_shape = self.__Y.shape()
1543 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1545 if self.__U is None: __U_shape = (0,)
1546 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1547 elif hasattr(self.__U,"shape"):
1548 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1549 else: __U_shape = self.__U.shape()
1550 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1552 if self.__B is None: __B_shape = (0,0)
1553 elif hasattr(self.__B,"shape"):
1554 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1555 else: __B_shape = self.__B.shape()
1556 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1558 if self.__R is None: __R_shape = (0,0)
1559 elif hasattr(self.__R,"shape"):
1560 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1561 else: __R_shape = self.__R.shape()
1562 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1564 if self.__Q is None: __Q_shape = (0,0)
1565 elif hasattr(self.__Q,"shape"):
1566 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1567 else: __Q_shape = self.__Q.shape()
1568 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1570 if len(self.__HO) == 0: __HO_shape = (0,0)
1571 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1572 elif hasattr(self.__HO["Direct"],"shape"):
1573 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1574 else: __HO_shape = self.__HO["Direct"].shape()
1575 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1577 if len(self.__EM) == 0: __EM_shape = (0,0)
1578 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1579 elif hasattr(self.__EM["Direct"],"shape"):
1580 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1581 else: __EM_shape = self.__EM["Direct"].shape()
1582 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1584 if len(self.__CM) == 0: __CM_shape = (0,0)
1585 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1586 elif hasattr(self.__CM["Direct"],"shape"):
1587 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1588 else: __CM_shape = self.__CM["Direct"].shape()
1589 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1591 # Vérification des conditions
1592 # ---------------------------
1593 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1594 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1595 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1596 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1598 if not( min(__B_shape) == max(__B_shape) ):
1599 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1600 if not( min(__R_shape) == max(__R_shape) ):
1601 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1602 if not( min(__Q_shape) == max(__Q_shape) ):
1603 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1604 if not( min(__EM_shape) == max(__EM_shape) ):
1605 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1607 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1609 "Shape characteristic of observation operator (H)"+\
1610 " \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1611 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1613 "Shape characteristic of observation operator (H)"+\
1614 " \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1615 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1617 "Shape characteristic of observation operator (H)"+\
1618 " \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1619 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1621 "Shape characteristic of observation operator (H)"+\
1622 " \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1624 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1625 if self.__algorithmName in ["EnsembleBlue",]:
1626 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1627 self.__Xb = Persistence.OneVector("Background")
1628 for member in asPersistentVector:
1629 self.__Xb.store( numpy.asarray(member, dtype=float) )
1630 __Xb_shape = min(__B_shape)
1633 "Shape characteristic of a priori errors covariance matrix (B)"+\
1634 " \"%s\" and background vector (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1636 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1638 "Shape characteristic of observation errors covariance matrix (R)"+\
1639 " \"%s\" and observation vector (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1641 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1643 "Shape characteristic of evolution model (EM)"+\
1644 " \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1646 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1648 "Shape characteristic of control model (CM)"+\
1649 " \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1651 if ("Bounds" in self.__P) \
1652 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1653 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1654 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1655 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1657 if ("StateBoundsForQuantiles" in self.__P) \
1658 and (isinstance(self.__P["StateBoundsForQuantiles"], list) or isinstance(self.__P["StateBoundsForQuantiles"], tuple)) \
1659 and (len(self.__P["StateBoundsForQuantiles"]) != max(__Xb_shape)):
1660 raise ValueError("The number \"%s\" of bound pairs for the quantile state (X) components is different of the size \"%s\" of the state itself." \
1661 %(len(self.__P["StateBoundsForQuantiles"]),max(__Xb_shape)))
1665 # ==============================================================================
1666 class RegulationAndParameters(object):
1668 Classe générale d'interface d'action pour la régulation et ses paramètres
1670 __slots__ = ("__name", "__P")
1673 name = "GenericRegulation",
1680 self.__name = str(name)
1683 if asAlgorithm is None and asScript is not None:
1684 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1686 __Algo = asAlgorithm
1688 if asDict is None and asScript is not None:
1689 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1693 if __Dict is not None:
1694 self.__P.update( dict(__Dict) )
1696 if __Algo is not None:
1697 self.__P.update( {"Algorithm":str(__Algo)} )
1699 def get(self, key = None):
1700 "Vérifie l'existence d'une clé de variable ou de paramètres"
1702 return self.__P[key]
1706 # ==============================================================================
1707 class DataObserver(object):
1709 Classe générale d'interface de type observer
1711 __slots__ = ("__name", "__V", "__O", "__I")
1714 name = "GenericObserver",
1726 self.__name = str(name)
1731 if onVariable is None:
1732 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1733 elif type(onVariable) in (tuple, list):
1734 self.__V = tuple(map( str, onVariable ))
1735 if withInfo is None:
1738 self.__I = (str(withInfo),)*len(self.__V)
1739 elif isinstance(onVariable, str):
1740 self.__V = (onVariable,)
1741 if withInfo is None:
1742 self.__I = (onVariable,)
1744 self.__I = (str(withInfo),)
1746 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1748 if asObsObject is not None:
1749 self.__O = asObsObject
1751 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1752 __Function = Observer2Func(__FunctionText)
1753 self.__O = __Function.getfunc()
1755 for k in range(len(self.__V)):
1758 if ename not in withAlgo:
1759 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1761 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1764 "x.__repr__() <==> repr(x)"
1765 return repr(self.__V)+"\n"+repr(self.__O)
1768 "x.__str__() <==> str(x)"
1769 return str(self.__V)+"\n"+str(self.__O)
1771 # ==============================================================================
1772 class UserScript(object):
1774 Classe générale d'interface de type texte de script utilisateur
1776 __slots__ = ("__name", "__F")
1779 name = "GenericUserScript",
1786 self.__name = str(name)
1788 if asString is not None:
1790 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1791 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1792 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1793 self.__F = Templates.ObserverTemplates[asTemplate]
1794 elif asScript is not None:
1795 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1800 "x.__repr__() <==> repr(x)"
1801 return repr(self.__F)
1804 "x.__str__() <==> str(x)"
1805 return str(self.__F)
1807 # ==============================================================================
1808 class ExternalParameters(object):
1810 Classe générale d'interface pour le stockage des paramètres externes
1812 __slots__ = ("__name", "__P")
1815 name = "GenericExternalParameters",
1821 self.__name = str(name)
1824 self.updateParameters( asDict, asScript )
1826 def updateParameters(self,
1830 "Mise à jour des paramètres"
1831 if asDict is None and asScript is not None:
1832 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1836 if __Dict is not None:
1837 self.__P.update( dict(__Dict) )
1839 def get(self, key = None):
1841 return self.__P[key]
1843 return list(self.__P.keys())
1846 return list(self.__P.keys())
1848 def pop(self, k, d):
1849 return self.__P.pop(k, d)
1852 return self.__P.items()
1854 def __contains__(self, key=None):
1855 "D.__contains__(k) -> True if D has a key k, else False"
1856 return key in self.__P
1858 # ==============================================================================
1859 class State(object):
1861 Classe générale d'interface de type état
1864 "__name", "__check", "__V", "__T", "__is_vector", "__is_series",
1869 name = "GenericVector",
1871 asPersistentVector = None,
1877 toBeChecked = False,
1880 Permet de définir un vecteur :
1881 - asVector : entrée des données, comme un vecteur compatible avec le
1882 constructeur de numpy.matrix, ou "True" si entrée par script.
1883 - asPersistentVector : entrée des données, comme une série de vecteurs
1884 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1885 type Persistence, ou "True" si entrée par script.
1886 - asScript : si un script valide est donné contenant une variable
1887 nommée "name", la variable est de type "asVector" (par défaut) ou
1888 "asPersistentVector" selon que l'une de ces variables est placée à
1890 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1891 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1892 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1893 nommée "name"), on récupère les colonnes et on les range ligne après
1894 ligne (colMajor=False, par défaut) ou colonne après colonne
1895 (colMajor=True). La variable résultante est de type "asVector" (par
1896 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1899 self.__name = str(name)
1900 self.__check = bool(toBeChecked)
1904 self.__is_vector = False
1905 self.__is_series = False
1907 if asScript is not None:
1908 __Vector, __Series = None, None
1909 if asPersistentVector:
1910 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1912 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1913 elif asDataFile is not None:
1914 __Vector, __Series = None, None
1915 if asPersistentVector:
1916 if colNames is not None:
1917 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1919 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1920 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1921 __Series = numpy.transpose(__Series)
1922 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1923 __Series = numpy.transpose(__Series)
1925 if colNames is not None:
1926 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1928 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1930 __Vector = numpy.ravel(__Vector, order = "F")
1932 __Vector = numpy.ravel(__Vector, order = "C")
1934 __Vector, __Series = asVector, asPersistentVector
1936 if __Vector is not None:
1937 self.__is_vector = True
1938 if isinstance(__Vector, str):
1939 __Vector = PlatformInfo.strvect2liststr( __Vector )
1940 self.__V = numpy.ravel(numpy.asarray( __Vector, dtype=float )).reshape((-1,1))
1941 self.shape = self.__V.shape
1942 self.size = self.__V.size
1943 elif __Series is not None:
1944 self.__is_series = True
1945 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1946 self.__V = Persistence.OneVector(self.__name)
1947 if isinstance(__Series, str):
1948 __Series = PlatformInfo.strmatrix2liststr(__Series)
1949 for member in __Series:
1950 if isinstance(member, str):
1951 member = PlatformInfo.strvect2liststr( member )
1952 self.__V.store(numpy.asarray( member, dtype=float ))
1955 if isinstance(self.__V.shape, (tuple, list)):
1956 self.shape = self.__V.shape
1958 self.shape = self.__V.shape()
1959 if len(self.shape) == 1:
1960 self.shape = (self.shape[0],1)
1961 self.size = self.shape[0] * self.shape[1]
1964 "The %s object is improperly defined or undefined,"%self.__name+\
1965 " it requires at minima either a vector, a list/tuple of"+\
1966 " vectors or a persistent object. Please check your vector input.")
1968 if scheduledBy is not None:
1969 self.__T = scheduledBy
1971 def getO(self, withScheduler=False):
1973 return self.__V, self.__T
1974 elif self.__T is None:
1980 "Vérification du type interne"
1981 return self.__is_vector
1984 "Vérification du type interne"
1985 return self.__is_series
1988 "x.__repr__() <==> repr(x)"
1989 return repr(self.__V)
1992 "x.__str__() <==> str(x)"
1993 return str(self.__V)
1995 # ==============================================================================
1996 class Covariance(object):
1998 Classe générale d'interface de type covariance
2001 "__name", "__check", "__C", "__is_scalar", "__is_vector", "__is_matrix",
2002 "__is_object", "shape", "size",
2006 name = "GenericCovariance",
2007 asCovariance = None,
2008 asEyeByScalar = None,
2009 asEyeByVector = None,
2012 toBeChecked = False,
2015 Permet de définir une covariance :
2016 - asCovariance : entrée des données, comme une matrice compatible avec
2017 le constructeur de numpy.matrix
2018 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
2019 multiplicatif d'une matrice de corrélation identité, aucune matrice
2020 n'étant donc explicitement à donner
2021 - asEyeByVector : entrée des données comme un seul vecteur de variance,
2022 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
2023 n'étant donc explicitement à donner
2024 - asCovObject : entrée des données comme un objet python, qui a les
2025 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
2026 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
2027 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
2028 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
2029 pleine doit être vérifié
2031 self.__name = str(name)
2032 self.__check = bool(toBeChecked)
2035 self.__is_scalar = False
2036 self.__is_vector = False
2037 self.__is_matrix = False
2038 self.__is_object = False
2040 if asScript is not None:
2041 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
2043 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2045 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2047 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2049 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
2051 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
2053 if __Scalar is not None:
2054 if isinstance(__Scalar, str):
2055 __Scalar = PlatformInfo.strvect2liststr( __Scalar )
2056 if len(__Scalar) > 0: __Scalar = __Scalar[0]
2057 if numpy.array(__Scalar).size != 1:
2059 " The diagonal multiplier given to define a sparse matrix is"+\
2060 " not a unique scalar value.\n Its actual measured size is"+\
2061 " %i. Please check your scalar input."%numpy.array(__Scalar).size)
2062 self.__is_scalar = True
2063 self.__C = numpy.abs( float(__Scalar) )
2066 elif __Vector is not None:
2067 if isinstance(__Vector, str):
2068 __Vector = PlatformInfo.strvect2liststr( __Vector )
2069 self.__is_vector = True
2070 self.__C = numpy.abs( numpy.ravel(numpy.asarray( __Vector, dtype=float )) )
2071 self.shape = (self.__C.size,self.__C.size)
2072 self.size = self.__C.size**2
2073 elif __Matrix is not None:
2074 self.__is_matrix = True
2075 self.__C = numpy.matrix( __Matrix, float )
2076 self.shape = self.__C.shape
2077 self.size = self.__C.size
2078 elif __Object is not None:
2079 self.__is_object = True
2081 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__matmul__","__mul__","__rmatmul__","__rmul__"):
2082 if not hasattr(self.__C,at):
2083 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
2084 if hasattr(self.__C,"shape"):
2085 self.shape = self.__C.shape
2088 if hasattr(self.__C,"size"):
2089 self.size = self.__C.size
2097 def __validate(self):
2099 if self.__C is None:
2100 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
2101 if self.ismatrix() and min(self.shape) != max(self.shape):
2102 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
2103 if self.isobject() and min(self.shape) != max(self.shape):
2104 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
2105 if self.isscalar() and self.__C <= 0:
2106 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
2107 if self.isvector() and (self.__C <= 0).any():
2108 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
2109 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
2111 numpy.linalg.cholesky( self.__C )
2113 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
2114 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
2118 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
2121 "Vérification du type interne"
2122 return self.__is_scalar
2125 "Vérification du type interne"
2126 return self.__is_vector
2129 "Vérification du type interne"
2130 return self.__is_matrix
2133 "Vérification du type interne"
2134 return self.__is_object
2139 return Covariance(self.__name+"I", asCovariance = numpy.linalg.inv(self.__C) )
2140 elif self.isvector():
2141 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
2142 elif self.isscalar():
2143 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
2144 elif self.isobject() and hasattr(self.__C,"getI"):
2145 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
2147 return None # Indispensable
2152 return Covariance(self.__name+"T", asCovariance = self.__C.T )
2153 elif self.isvector():
2154 return Covariance(self.__name+"T", asEyeByVector = self.__C )
2155 elif self.isscalar():
2156 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
2157 elif self.isobject() and hasattr(self.__C,"getT"):
2158 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
2160 raise AttributeError("the %s covariance matrix has no getT attribute."%(self.__name,))
2163 "Décomposition de Cholesky"
2165 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
2166 elif self.isvector():
2167 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
2168 elif self.isscalar():
2169 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
2170 elif self.isobject() and hasattr(self.__C,"cholesky"):
2171 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
2173 raise AttributeError("the %s covariance matrix has no cholesky attribute."%(self.__name,))
2175 def choleskyI(self):
2176 "Inversion de la décomposition de Cholesky"
2178 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.linalg.cholesky(self.__C)) )
2179 elif self.isvector():
2180 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
2181 elif self.isscalar():
2182 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
2183 elif self.isobject() and hasattr(self.__C,"choleskyI"):
2184 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
2186 raise AttributeError("the %s covariance matrix has no choleskyI attribute."%(self.__name,))
2189 "Racine carrée matricielle"
2192 return Covariance(self.__name+"C", asCovariance = numpy.real(scipy.linalg.sqrtm(self.__C)) )
2193 elif self.isvector():
2194 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
2195 elif self.isscalar():
2196 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
2197 elif self.isobject() and hasattr(self.__C,"sqrtm"):
2198 return Covariance(self.__name+"C", asCovObject = self.__C.sqrtm() )
2200 raise AttributeError("the %s covariance matrix has no sqrtm attribute."%(self.__name,))
2203 "Inversion de la racine carrée matricielle"
2206 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.real(scipy.linalg.sqrtm(self.__C))) )
2207 elif self.isvector():
2208 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
2209 elif self.isscalar():
2210 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
2211 elif self.isobject() and hasattr(self.__C,"sqrtmI"):
2212 return Covariance(self.__name+"H", asCovObject = self.__C.sqrtmI() )
2214 raise AttributeError("the %s covariance matrix has no sqrtmI attribute."%(self.__name,))
2216 def diag(self, msize=None):
2217 "Diagonale de la matrice"
2219 return numpy.diag(self.__C)
2220 elif self.isvector():
2222 elif self.isscalar():
2224 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2226 return self.__C * numpy.ones(int(msize))
2227 elif self.isobject() and hasattr(self.__C,"diag"):
2228 return self.__C.diag()
2230 raise AttributeError("the %s covariance matrix has no diag attribute."%(self.__name,))
2232 def trace(self, msize=None):
2233 "Trace de la matrice"
2235 return numpy.trace(self.__C)
2236 elif self.isvector():
2237 return float(numpy.sum(self.__C))
2238 elif self.isscalar():
2240 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2242 return self.__C * int(msize)
2243 elif self.isobject():
2244 return self.__C.trace()
2246 raise AttributeError("the %s covariance matrix has no trace attribute."%(self.__name,))
2248 def asfullmatrix(self, msize=None):
2251 return numpy.asarray(self.__C, dtype=float)
2252 elif self.isvector():
2253 return numpy.asarray( numpy.diag(self.__C), dtype=float )
2254 elif self.isscalar():
2256 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2258 return numpy.asarray( self.__C * numpy.eye(int(msize)), dtype=float )
2259 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
2260 return self.__C.asfullmatrix()
2262 raise AttributeError("the %s covariance matrix has no asfullmatrix attribute."%(self.__name,))
2264 def assparsematrix(self):
2272 "x.__repr__() <==> repr(x)"
2273 return repr(self.__C)
2276 "x.__str__() <==> str(x)"
2277 return str(self.__C)
2279 def __add__(self, other):
2280 "x.__add__(y) <==> x+y"
2281 if self.ismatrix() or self.isobject():
2282 return self.__C + numpy.asmatrix(other)
2283 elif self.isvector() or self.isscalar():
2284 _A = numpy.asarray(other)
2285 if len(_A.shape) == 1:
2286 _A.reshape((-1,1))[::2] += self.__C
2288 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
2289 return numpy.asmatrix(_A)
2291 def __radd__(self, other):
2292 "x.__radd__(y) <==> y+x"
2293 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
2295 def __sub__(self, other):
2296 "x.__sub__(y) <==> x-y"
2297 if self.ismatrix() or self.isobject():
2298 return self.__C - numpy.asmatrix(other)
2299 elif self.isvector() or self.isscalar():
2300 _A = numpy.asarray(other)
2301 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
2302 return numpy.asmatrix(_A)
2304 def __rsub__(self, other):
2305 "x.__rsub__(y) <==> y-x"
2306 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
2309 "x.__neg__() <==> -x"
2312 def __matmul__(self, other):
2313 "x.__mul__(y) <==> x@y"
2314 if self.ismatrix() and isinstance(other, (int, float)):
2315 return numpy.asarray(self.__C) * other
2316 elif self.ismatrix() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2317 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2318 return numpy.ravel(self.__C @ numpy.ravel(other))
2319 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2320 return numpy.asarray(self.__C) @ numpy.asarray(other)
2322 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asarray(other).shape,self.__name))
2323 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2324 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2325 return numpy.ravel(self.__C) * numpy.ravel(other)
2326 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2327 return numpy.ravel(self.__C).reshape((-1,1)) * numpy.asarray(other)
2329 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2330 elif self.isscalar() and isinstance(other,numpy.matrix):
2331 return numpy.asarray(self.__C * other)
2332 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2333 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2334 return self.__C * numpy.ravel(other)
2336 return self.__C * numpy.asarray(other)
2337 elif self.isobject():
2338 return self.__C.__matmul__(other)
2340 raise NotImplementedError("%s covariance matrix __matmul__ method not available for %s type!"%(self.__name,type(other)))
2342 def __mul__(self, other):
2343 "x.__mul__(y) <==> x*y"
2344 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2345 return self.__C * other
2346 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2347 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2348 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2349 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2350 return self.__C * numpy.asmatrix(other)
2353 "operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
2354 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2355 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2356 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
2357 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2358 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
2361 "operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2362 elif self.isscalar() and isinstance(other,numpy.matrix):
2363 return self.__C * other
2364 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2365 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2366 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2368 return self.__C * numpy.asmatrix(other)
2369 elif self.isobject():
2370 return self.__C.__mul__(other)
2372 raise NotImplementedError(
2373 "%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
2375 def __rmatmul__(self, other):
2376 "x.__rmul__(y) <==> y@x"
2377 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2378 return other * self.__C
2379 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2380 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2381 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2382 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2383 return numpy.asmatrix(other) * self.__C
2386 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2387 elif self.isvector() and isinstance(other,numpy.matrix):
2388 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2389 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2390 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2391 return numpy.asmatrix(numpy.array(other) * self.__C)
2394 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2395 elif self.isscalar() and isinstance(other,numpy.matrix):
2396 return other * self.__C
2397 elif self.isobject():
2398 return self.__C.__rmatmul__(other)
2400 raise NotImplementedError(
2401 "%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name,type(other)))
2403 def __rmul__(self, other):
2404 "x.__rmul__(y) <==> y*x"
2405 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2406 return other * self.__C
2407 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2408 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2409 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2410 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2411 return numpy.asmatrix(other) * self.__C
2414 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2415 elif self.isvector() and isinstance(other,numpy.matrix):
2416 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2417 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2418 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2419 return numpy.asmatrix(numpy.array(other) * self.__C)
2422 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2423 elif self.isscalar() and isinstance(other,numpy.matrix):
2424 return other * self.__C
2425 elif self.isscalar() and isinstance(other,float):
2426 return other * self.__C
2427 elif self.isobject():
2428 return self.__C.__rmul__(other)
2430 raise NotImplementedError(
2431 "%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
2434 "x.__len__() <==> len(x)"
2435 return self.shape[0]
2437 # ==============================================================================
2438 class Observer2Func(object):
2440 Création d'une fonction d'observateur a partir de son texte
2442 __slots__ = ("__corps")
2444 def __init__(self, corps=""):
2445 self.__corps = corps
2446 def func(self,var,info):
2447 "Fonction d'observation"
2450 "Restitution du pointeur de fonction dans l'objet"
2453 # ==============================================================================
2454 class CaseLogger(object):
2456 Conservation des commandes de création d'un cas
2459 "__name", "__objname", "__logSerie", "__switchoff", "__viewers",
2463 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
2464 self.__name = str(__name)
2465 self.__objname = str(__objname)
2466 self.__logSerie = []
2467 self.__switchoff = False
2469 "TUI" :Interfaces._TUIViewer,
2470 "SCD" :Interfaces._SCDViewer,
2471 "YACS":Interfaces._YACSViewer,
2472 "SimpleReportInRst":Interfaces._SimpleReportInRstViewer,
2473 "SimpleReportInHtml":Interfaces._SimpleReportInHtmlViewer,
2474 "SimpleReportInPlainTxt":Interfaces._SimpleReportInPlainTxtViewer,
2477 "TUI" :Interfaces._TUIViewer,
2478 "COM" :Interfaces._COMViewer,
2480 if __addViewers is not None:
2481 self.__viewers.update(dict(__addViewers))
2482 if __addLoaders is not None:
2483 self.__loaders.update(dict(__addLoaders))
2485 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
2486 "Enregistrement d'une commande individuelle"
2487 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
2488 if "self" in __keys: __keys.remove("self")
2489 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
2491 self.__switchoff = True
2493 self.__switchoff = False
2495 def dump(self, __filename=None, __format="TUI", __upa=""):
2496 "Restitution normalisée des commandes"
2497 if __format in self.__viewers:
2498 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2500 raise ValueError("Dumping as \"%s\" is not available"%__format)
2501 return __formater.dump(__filename, __upa)
2503 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2504 "Chargement normalisé des commandes"
2505 if __format in self.__loaders:
2506 __formater = self.__loaders[__format]()
2508 raise ValueError("Loading as \"%s\" is not available"%__format)
2509 return __formater.load(__filename, __content, __object)
2511 # ==============================================================================
2514 _extraArguments = None,
2515 _sFunction = lambda x: x,
2520 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2521 correspondante de valeurs de la fonction en argument
2523 # Vérifications et définitions initiales
2524 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2525 if not PlatformInfo.isIterable( __xserie ):
2526 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2528 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2531 __mpWorkers = int(_mpWorkers)
2533 import multiprocessing
2544 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2545 with multiprocessing.Pool(__mpWorkers) as pool:
2546 __multiHX = pool.map( _sFunction, _jobs )
2549 # logging.debug("MULTF Internal multiprocessing calculation end")
2551 # logging.debug("MULTF Internal monoprocessing calculation begin")
2553 if _extraArguments is None:
2554 for __xvalue in __xserie:
2555 __multiHX.append( _sFunction( __xvalue ) )
2556 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2557 for __xvalue in __xserie:
2558 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2559 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2560 for __xvalue in __xserie:
2561 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2563 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2564 # logging.debug("MULTF Internal monoprocessing calculation end")
2566 # logging.debug("MULTF Internal multifonction calculations end")
2569 # ==============================================================================
2570 if __name__ == "__main__":
2571 print("\n AUTODIAGNOSTIC\n")