1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2023 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
36 from functools import partial
37 from daCore import Persistence, PlatformInfo, Interfaces
38 from daCore import Templates
40 # ==============================================================================
41 class CacheManager(object):
43 Classe générale de gestion d'un cache de calculs
46 toleranceInRedundancy = 1.e-18,
47 lengthOfRedundancy = -1,
50 Les caractéristiques de tolérance peuvent être modifiées à la création.
52 self.__tolerBP = float(toleranceInRedundancy)
53 self.__lengthOR = int(lengthOfRedundancy)
54 self.__initlnOR = self.__lengthOR
64 def wasCalculatedIn(self, xValue, oName="" ):
65 "Vérifie l'existence d'un calcul correspondant à la valeur"
69 for i in range(min(len(self.__listOPCV),self.__lengthOR)-1,-1,-1):
70 if not hasattr(xValue, 'size'):
72 elif (str(oName) != self.__listOPCV[i][3]):
74 elif (xValue.size != self.__listOPCV[i][0].size):
76 elif (numpy.ravel(xValue)[0] - self.__listOPCV[i][0][0]) > (self.__tolerBP * self.__listOPCV[i][2] / self.__listOPCV[i][0].size):
78 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < (self.__tolerBP * self.__listOPCV[i][2]):
80 __HxV = self.__listOPCV[i][1]
84 def storeValueInX(self, xValue, HxValue, oName="" ):
85 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
86 if self.__lengthOR < 0:
87 self.__lengthOR = 2 * min(numpy.size(xValue), 50) + 2
88 self.__initlnOR = self.__lengthOR
89 self.__seenNames.append(str(oName))
90 if str(oName) not in self.__seenNames: # Etend la liste si nouveau
91 self.__lengthOR += 2 * min(numpy.size(xValue), 50) + 2
92 self.__initlnOR += self.__lengthOR
93 self.__seenNames.append(str(oName))
94 while len(self.__listOPCV) > self.__lengthOR:
95 self.__listOPCV.pop(0)
96 self.__listOPCV.append( (
97 copy.copy(numpy.ravel(xValue)), # 0 Previous point
98 copy.copy(HxValue), # 1 Previous value
99 numpy.linalg.norm(xValue), # 2 Norm
100 str(oName), # 3 Operator name
105 self.__initlnOR = self.__lengthOR
107 self.__enabled = False
111 self.__lengthOR = self.__initlnOR
112 self.__enabled = True
114 # ==============================================================================
115 class Operator(object):
117 Classe générale d'interface de type opérateur simple
125 name = "GenericOperator",
128 avoidingRedundancy = True,
129 reducingMemoryUse = False,
130 inputAsMultiFunction = False,
131 enableMultiProcess = False,
132 extraArguments = None,
135 On construit un objet de ce type en fournissant, à l'aide de l'un des
136 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
139 - name : nom d'opérateur
140 - fromMethod : argument de type fonction Python
141 - fromMatrix : argument adapté au constructeur numpy.array/matrix
142 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
143 - reducingMemoryUse : booléen forçant (ou pas) des calculs moins
145 - inputAsMultiFunction : booléen indiquant une fonction explicitement
146 définie (ou pas) en multi-fonction
147 - extraArguments : arguments supplémentaires passés à la fonction de
148 base et ses dérivées (tuple ou dictionnaire)
150 self.__name = str(name)
151 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
152 self.__reduceM = bool( reducingMemoryUse )
153 self.__avoidRC = bool( avoidingRedundancy )
154 self.__inputAsMF = bool( inputAsMultiFunction )
155 self.__mpEnabled = bool( enableMultiProcess )
156 self.__extraArgs = extraArguments
157 if fromMethod is not None and self.__inputAsMF:
158 self.__Method = fromMethod # logtimer(fromMethod)
160 self.__Type = "Method"
161 elif fromMethod is not None and not self.__inputAsMF:
162 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
164 self.__Type = "Method"
165 elif fromMatrix is not None:
167 if isinstance(fromMatrix, str):
168 fromMatrix = PlatformInfo.strmatrix2liststr( fromMatrix )
169 self.__Matrix = numpy.asarray( fromMatrix, dtype=float )
170 self.__Type = "Matrix"
176 def disableAvoidingRedundancy(self):
178 Operator.CM.disable()
180 def enableAvoidingRedundancy(self):
185 Operator.CM.disable()
191 def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False):
193 Permet de restituer le résultat de l'application de l'opérateur à une
194 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
195 argument devant a priori être du bon type.
197 - les arguments par série sont :
198 - xValue : argument adapté pour appliquer l'opérateur
199 - HValue : valeur précalculée de l'opérateur en ce point
200 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
207 if HValue is not None:
211 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
213 if _HValue is not None:
214 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
216 for i in range(len(_HValue)):
217 _HxValue.append( _HValue[i] )
219 Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name)
224 for i, xv in enumerate(_xValue):
226 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
228 __alreadyCalculated = False
230 if __alreadyCalculated:
231 self.__addOneCacheCall()
234 if self.__Matrix is not None:
235 self.__addOneMatrixCall()
236 _hv = self.__Matrix @ numpy.ravel(xv)
238 self.__addOneMethodCall()
242 _HxValue.append( _hv )
244 if len(_xserie)>0 and self.__Matrix is None:
245 if self.__extraArgs is None:
246 _hserie = self.__Method( _xserie ) # Calcul MF
248 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
249 if not hasattr(_hserie, "pop"):
251 "The user input multi-function doesn't seem to return a"+\
252 " result sequence, behaving like a mono-function. It has"+\
260 Operator.CM.storeValueInX(_xv,_hv,self.__name)
262 if returnSerieAsArrayMatrix:
263 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
265 if argsAsSerie: return _HxValue
266 else: return _HxValue[-1]
268 def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
270 Permet de restituer le résultat de l'application de l'opérateur à des
271 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
272 argument devant a priori être du bon type. Si la uValue est None,
273 on suppose que l'opérateur ne s'applique qu'à xValue.
275 - paires : les arguments par paire sont :
276 - xValue : argument X adapté pour appliquer l'opérateur
277 - uValue : argument U adapté pour appliquer l'opérateur
278 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
280 if argsAsSerie: _xuValue = paires
281 else: _xuValue = (paires,)
282 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
284 if self.__Matrix is not None:
286 for paire in _xuValue:
287 _xValue, _uValue = paire
288 self.__addOneMatrixCall()
289 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
292 for paire in _xuValue:
293 _xValue, _uValue = paire
294 if _uValue is not None:
295 _xuArgs.append( paire )
297 _xuArgs.append( _xValue )
298 self.__addOneMethodCall( len(_xuArgs) )
299 if self.__extraArgs is None:
300 _HxValue = self.__Method( _xuArgs ) # Calcul MF
302 _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF
304 if returnSerieAsArrayMatrix:
305 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
307 if argsAsSerie: return _HxValue
308 else: return _HxValue[-1]
310 def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
312 Permet de restituer le résultat de l'application de l'opérateur à une
313 série d'arguments xValue, sachant que l'opérateur est valable en
314 xNominal. Cette méthode se contente d'appliquer, son argument devant a
315 priori être du bon type. Si l'opérateur est linéaire car c'est une
316 matrice, alors il est valable en tout point nominal et xNominal peut
317 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
318 permet d'indiquer que l'argument est multi-paires.
320 - paires : les arguments par paire sont :
321 - xNominal : série d'arguments permettant de donner le point où
322 l'opérateur est construit pour être ensuite appliqué
323 - xValue : série d'arguments adaptés pour appliquer l'opérateur
324 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
326 if argsAsSerie: _nxValue = paires
327 else: _nxValue = (paires,)
328 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
330 if self.__Matrix is not None:
332 for paire in _nxValue:
333 _xNominal, _xValue = paire
334 self.__addOneMatrixCall()
335 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
337 self.__addOneMethodCall( len(_nxValue) )
338 if self.__extraArgs is None:
339 _HxValue = self.__Method( _nxValue ) # Calcul MF
341 _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
343 if returnSerieAsArrayMatrix:
344 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
346 if argsAsSerie: return _HxValue
347 else: return _HxValue[-1]
349 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
351 Permet de renvoyer l'opérateur sous la forme d'une matrice
353 if self.__Matrix is not None:
354 self.__addOneMatrixCall()
355 mValue = [self.__Matrix,]
356 elif not isinstance(ValueForMethodForm,str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
359 self.__addOneMethodCall( len(ValueForMethodForm) )
360 for _vfmf in ValueForMethodForm:
361 mValue.append( self.__Method(((_vfmf, None),)) )
363 self.__addOneMethodCall()
364 mValue = self.__Method(((ValueForMethodForm, None),))
366 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
368 if argsAsSerie: return mValue
369 else: return mValue[-1]
373 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
374 la forme d'une matrice
376 if self.__Matrix is not None:
377 return self.__Matrix.shape
379 raise ValueError("Matrix form of the operator is not available, nor the shape")
381 def nbcalls(self, which=None):
383 Renvoie les nombres d'évaluations de l'opérateur
386 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
387 self.__NbCallsAsMatrix,
388 self.__NbCallsAsMethod,
389 self.__NbCallsOfCached,
390 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
391 Operator.NbCallsAsMatrix,
392 Operator.NbCallsAsMethod,
393 Operator.NbCallsOfCached,
395 if which is None: return __nbcalls
396 else: return __nbcalls[which]
398 def __addOneMatrixCall(self):
399 "Comptabilise un appel"
400 self.__NbCallsAsMatrix += 1 # Decompte local
401 Operator.NbCallsAsMatrix += 1 # Decompte global
403 def __addOneMethodCall(self, nb = 1):
404 "Comptabilise un appel"
405 self.__NbCallsAsMethod += nb # Decompte local
406 Operator.NbCallsAsMethod += nb # Decompte global
408 def __addOneCacheCall(self):
409 "Comptabilise un appel"
410 self.__NbCallsOfCached += 1 # Decompte local
411 Operator.NbCallsOfCached += 1 # Decompte global
413 # ==============================================================================
414 class FullOperator(object):
416 Classe générale d'interface de type opérateur complet
417 (Direct, Linéaire Tangent, Adjoint)
420 name = "GenericFullOperator",
422 asOneFunction = None, # 1 Fonction
423 asThreeFunctions = None, # 3 Fonctions in a dictionary
424 asScript = None, # 1 or 3 Fonction(s) by script
425 asDict = None, # Parameters
427 extraArguments = None,
428 performancePrf = None,
429 inputAsMF = False,# Fonction(s) as Multi-Functions
434 self.__name = str(name)
435 self.__check = bool(toBeChecked)
436 self.__extraArgs = extraArguments
441 if (asDict is not None) and isinstance(asDict, dict):
442 __Parameters.update( asDict )
443 # Priorité à EnableMultiProcessingInDerivatives=True
444 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
445 __Parameters["EnableMultiProcessingInDerivatives"] = True
446 __Parameters["EnableMultiProcessingInEvaluation"] = False
447 if "EnableMultiProcessingInDerivatives" not in __Parameters:
448 __Parameters["EnableMultiProcessingInDerivatives"] = False
449 if __Parameters["EnableMultiProcessingInDerivatives"]:
450 __Parameters["EnableMultiProcessingInEvaluation"] = False
451 if "EnableMultiProcessingInEvaluation" not in __Parameters:
452 __Parameters["EnableMultiProcessingInEvaluation"] = False
453 if "withIncrement" in __Parameters: # Temporaire
454 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
455 # Le défaut est équivalent à "ReducedOverallRequirements"
456 __reduceM, __avoidRC = True, True
457 if performancePrf is not None:
458 if performancePrf == "ReducedAmountOfCalculation":
459 __reduceM, __avoidRC = False, True
460 elif performancePrf == "ReducedMemoryFootprint":
461 __reduceM, __avoidRC = True, False
462 elif performancePrf == "NoSavings":
463 __reduceM, __avoidRC = False, False
465 if asScript is not None:
466 __Matrix, __Function = None, None
468 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
470 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
471 __Function.update({"useApproximatedDerivatives":True})
472 __Function.update(__Parameters)
473 elif asThreeFunctions:
475 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
476 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
477 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
479 __Function.update(__Parameters)
482 if asOneFunction is not None:
483 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
484 if asOneFunction["Direct"] is not None:
485 __Function = asOneFunction
487 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
489 __Function = { "Direct":asOneFunction }
490 __Function.update({"useApproximatedDerivatives":True})
491 __Function.update(__Parameters)
492 elif asThreeFunctions is not None:
493 if isinstance(asThreeFunctions, dict) and \
494 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
495 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
496 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
497 __Function = asThreeFunctions
498 elif isinstance(asThreeFunctions, dict) and \
499 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
500 __Function = asThreeFunctions
501 __Function.update({"useApproximatedDerivatives":True})
504 "The functions has to be given in a dictionnary which have either"+\
505 " 1 key (\"Direct\") or"+\
506 " 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
507 if "Direct" not in asThreeFunctions:
508 __Function["Direct"] = asThreeFunctions["Tangent"]
509 __Function.update(__Parameters)
513 if appliedInX is not None and isinstance(appliedInX, dict):
514 __appliedInX = appliedInX
515 elif appliedInX is not None:
516 __appliedInX = {"HXb":appliedInX}
520 if scheduledBy is not None:
521 self.__T = scheduledBy
523 if isinstance(__Function, dict) and \
524 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
525 ("Direct" in __Function) and (__Function["Direct"] is not None):
526 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
527 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
528 if "withdX" not in __Function: __Function["withdX"] = None
529 if "withReducingMemoryUse" not in __Function: __Function["withReducingMemoryUse"] = __reduceM
530 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = __avoidRC
531 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
532 if "withLengthOfRedundancy" not in __Function: __Function["withLengthOfRedundancy"] = -1
533 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
534 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
535 from daCore import NumericObjects
536 FDA = NumericObjects.FDApproximation(
538 Function = __Function["Direct"],
539 centeredDF = __Function["CenteredFiniteDifference"],
540 increment = __Function["DifferentialIncrement"],
541 dX = __Function["withdX"],
542 extraArguments = self.__extraArgs,
543 reducingMemoryUse = __Function["withReducingMemoryUse"],
544 avoidingRedundancy = __Function["withAvoidingRedundancy"],
545 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
546 lengthOfRedundancy = __Function["withLengthOfRedundancy"],
547 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
548 mpWorkers = __Function["NumberOfProcesses"],
549 mfEnabled = __Function["withmfEnabled"],
551 self.__FO["Direct"] = Operator(
553 fromMethod = FDA.DirectOperator,
554 reducingMemoryUse = __reduceM,
555 avoidingRedundancy = __avoidRC,
556 inputAsMultiFunction = inputAsMF,
557 extraArguments = self.__extraArgs,
558 enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
559 self.__FO["Tangent"] = Operator(
560 name = self.__name+"Tangent",
561 fromMethod = FDA.TangentOperator,
562 reducingMemoryUse = __reduceM,
563 avoidingRedundancy = __avoidRC,
564 inputAsMultiFunction = inputAsMF,
565 extraArguments = self.__extraArgs )
566 self.__FO["Adjoint"] = Operator(
567 name = self.__name+"Adjoint",
568 fromMethod = FDA.AdjointOperator,
569 reducingMemoryUse = __reduceM,
570 avoidingRedundancy = __avoidRC,
571 inputAsMultiFunction = inputAsMF,
572 extraArguments = self.__extraArgs )
573 elif isinstance(__Function, dict) and \
574 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
575 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
576 self.__FO["Direct"] = Operator(
578 fromMethod = __Function["Direct"],
579 reducingMemoryUse = __reduceM,
580 avoidingRedundancy = __avoidRC,
581 inputAsMultiFunction = inputAsMF,
582 extraArguments = self.__extraArgs,
583 enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
584 self.__FO["Tangent"] = Operator(
585 name = self.__name+"Tangent",
586 fromMethod = __Function["Tangent"],
587 reducingMemoryUse = __reduceM,
588 avoidingRedundancy = __avoidRC,
589 inputAsMultiFunction = inputAsMF,
590 extraArguments = self.__extraArgs )
591 self.__FO["Adjoint"] = Operator(
592 name = self.__name+"Adjoint",
593 fromMethod = __Function["Adjoint"],
594 reducingMemoryUse = __reduceM,
595 avoidingRedundancy = __avoidRC,
596 inputAsMultiFunction = inputAsMF,
597 extraArguments = self.__extraArgs )
598 elif asMatrix is not None:
599 if isinstance(__Matrix, str):
600 __Matrix = PlatformInfo.strmatrix2liststr( __Matrix )
601 __matrice = numpy.asarray( __Matrix, dtype=float )
602 self.__FO["Direct"] = Operator(
604 fromMatrix = __matrice,
605 reducingMemoryUse = __reduceM,
606 avoidingRedundancy = __avoidRC,
607 inputAsMultiFunction = inputAsMF,
608 enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
609 self.__FO["Tangent"] = Operator(
610 name = self.__name+"Tangent",
611 fromMatrix = __matrice,
612 reducingMemoryUse = __reduceM,
613 avoidingRedundancy = __avoidRC,
614 inputAsMultiFunction = inputAsMF )
615 self.__FO["Adjoint"] = Operator(
616 name = self.__name+"Adjoint",
617 fromMatrix = __matrice.T,
618 reducingMemoryUse = __reduceM,
619 avoidingRedundancy = __avoidRC,
620 inputAsMultiFunction = inputAsMF )
624 "The %s object is improperly defined or undefined,"%self.__name+\
625 " it requires at minima either a matrix, a Direct operator for"+\
626 " approximate derivatives or a Tangent/Adjoint operators pair."+\
627 " Please check your operator input.")
629 if __appliedInX is not None:
630 self.__FO["AppliedInX"] = {}
631 for key in __appliedInX:
632 if isinstance(__appliedInX[key], str):
633 __appliedInX[key] = PlatformInfo.strvect2liststr( __appliedInX[key] )
634 self.__FO["AppliedInX"][key] = numpy.ravel( __appliedInX[key] ).reshape((-1,1))
636 self.__FO["AppliedInX"] = None
642 "x.__repr__() <==> repr(x)"
643 return repr(self.__FO)
646 "x.__str__() <==> str(x)"
647 return str(self.__FO)
649 # ==============================================================================
650 class Algorithm(object):
652 Classe générale d'interface de type algorithme
654 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
655 d'assimilation, en fournissant un container (dictionnaire) de variables
656 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
658 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
660 def __init__(self, name):
662 L'initialisation présente permet de fabriquer des variables de stockage
663 disponibles de manière générique dans les algorithmes élémentaires. Ces
664 variables de stockage sont ensuite conservées dans un dictionnaire
665 interne à l'objet, mais auquel on accède par la méthode "get".
667 Les variables prévues sont :
668 - APosterioriCorrelations : matrice de corrélations de la matrice A
669 - APosterioriCovariance : matrice de covariances a posteriori : A
670 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
671 - APosterioriVariances : vecteur des variances de la matrice A
672 - Analysis : vecteur d'analyse : Xa
673 - BMA : Background moins Analysis : Xa - Xb
674 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
675 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
676 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
677 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
678 - CostFunctionJo : partie observations de la fonction-coût : Jo
679 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
680 - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
681 - CurrentOptimum : état optimal courant lors d'itérations
682 - CurrentState : état courant lors d'itérations
683 - CurrentStepNumber : pas courant d'avancement dans les algorithmes en évolution, à partir de 0
684 - EnsembleOfSimulations : ensemble d'états (sorties, simulations) rangés par colonne dans une matrice
685 - EnsembleOfSnapshots : ensemble d'états rangés par colonne dans une matrice
686 - EnsembleOfStates : ensemble d'états (entrées, paramètres) rangés par colonne dans une matrice
687 - ForecastCovariance : covariance de l'état prédit courant lors d'itérations
688 - ForecastState : état prédit courant lors d'itérations
689 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
690 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
691 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
692 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
693 - Innovation : l'innovation : d = Y - H(X)
694 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
695 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
696 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
697 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
698 - KalmanGainAtOptimum : gain de Kalman à l'optimum
699 - MahalanobisConsistency : indicateur de consistance des covariances
700 - OMA : Observation moins Analyse : Y - Xa
701 - OMB : Observation moins Background : Y - Xb
702 - Residu : dans le cas des algorithmes de vérification
703 - SampledStateForQuantiles : échantillons d'états pour l'estimation des quantiles
704 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
705 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
706 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
707 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
708 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
709 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
710 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
711 On peut rajouter des variables à stocker dans l'initialisation de
712 l'algorithme élémentaire qui va hériter de cette classe
714 logging.debug("%s Initialisation", str(name))
715 self._m = PlatformInfo.SystemUsage()
717 self._name = str( name )
718 self._parameters = {"StoreSupplementaryCalculations":[]}
719 self.__internal_state = {}
720 self.__required_parameters = {}
721 self.__required_inputs = {
722 "RequiredInputValues":{"mandatory":(), "optional":()},
723 "ClassificationTags":[],
725 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
726 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
727 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
728 self.__replace_by_the_new_name = {} # Nouveau nom à partir d'un nom ancien
730 self.StoredVariables = {}
731 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
732 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
733 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
734 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
735 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
736 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
737 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
738 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
739 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
740 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
741 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
742 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
743 self.StoredVariables["CurrentEnsembleState"] = Persistence.OneMatrix(name = "CurrentEnsembleState")
744 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
745 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
746 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
747 self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
748 self.StoredVariables["EnsembleOfSimulations"] = Persistence.OneMatrix(name = "EnsembleOfSimulations")
749 self.StoredVariables["EnsembleOfSnapshots"] = Persistence.OneMatrix(name = "EnsembleOfSnapshots")
750 self.StoredVariables["EnsembleOfStates"] = Persistence.OneMatrix(name = "EnsembleOfStates")
751 self.StoredVariables["ForecastCovariance"] = Persistence.OneMatrix(name = "ForecastCovariance")
752 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
753 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
754 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
755 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
756 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
757 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
758 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
759 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
760 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
761 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
762 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
763 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
764 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
765 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
766 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
767 self.StoredVariables["OptimalPoints"] = Persistence.OneVector(name = "OptimalPoints")
768 self.StoredVariables["ReducedBasis"] = Persistence.OneMatrix(name = "ReducedBasis")
769 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
770 self.StoredVariables["Residus"] = Persistence.OneVector(name = "Residus")
771 self.StoredVariables["SampledStateForQuantiles"] = Persistence.OneMatrix(name = "SampledStateForQuantiles")
772 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
773 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
774 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
775 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
776 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
777 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
778 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
779 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
781 for k in self.StoredVariables:
782 self.__canonical_stored_name[k.lower()] = k
784 for k, v in self.__variable_names_not_public.items():
785 self.__canonical_parameter_name[k.lower()] = k
786 self.__canonical_parameter_name["algorithm"] = "Algorithm"
787 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
789 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
791 logging.debug("%s Lancement", self._name)
792 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
793 self._getTimeState(reset=True)
795 # Mise à jour des paramètres internes avec le contenu de Parameters, en
796 # reprenant les valeurs par défauts pour toutes celles non définies
797 self.__setParameters(Parameters, reset=True)
798 for k, v in self.__variable_names_not_public.items():
799 if k not in self._parameters: self.__setParameters( {k:v} )
801 # Corrections et compléments des vecteurs
802 def __test_vvalue(argument, variable, argname, symbol=None):
803 if symbol is None: symbol = variable
805 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
806 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol))
807 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
808 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol))
810 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol))
812 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
813 logging.debug("%s %s vector %s is required and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
814 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
815 logging.debug("%s %s vector %s is optional and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
818 "%s %s vector %s is set although neither required nor optional, and its size is %i."%(
819 self._name,argname,symbol,numpy.array(argument).size))
821 __test_vvalue( Xb, "Xb", "Background or initial state" )
822 __test_vvalue( Y, "Y", "Observation" )
823 __test_vvalue( U, "U", "Control" )
825 # Corrections et compléments des covariances
826 def __test_cvalue(argument, variable, argname, symbol=None):
827 if symbol is None: symbol = variable
829 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
830 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol))
831 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
832 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol))
834 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol))
836 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
837 logging.debug("%s %s error covariance matrix %s is required and set."%(self._name,argname,symbol))
838 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
839 logging.debug("%s %s error covariance matrix %s is optional and set."%(self._name,argname,symbol))
841 logging.debug("%s %s error covariance matrix %s is set although neither required nor optional."%(self._name,argname,symbol))
843 __test_cvalue( B, "B", "Background" )
844 __test_cvalue( R, "R", "Observation" )
845 __test_cvalue( Q, "Q", "Evolution" )
847 # Corrections et compléments des opérateurs
848 def __test_ovalue(argument, variable, argname, symbol=None):
849 if symbol is None: symbol = variable
850 if argument is None or (isinstance(argument,dict) and len(argument)==0):
851 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
852 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol))
853 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
854 logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol))
856 logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol))
858 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
859 logging.debug("%s %s operator %s is required and set."%(self._name,argname,symbol))
860 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
861 logging.debug("%s %s operator %s is optional and set."%(self._name,argname,symbol))
863 logging.debug("%s %s operator %s is set although neither required nor optional."%(self._name,argname,symbol))
865 __test_ovalue( HO, "HO", "Observation", "H" )
866 __test_ovalue( EM, "EM", "Evolution", "M" )
867 __test_ovalue( CM, "CM", "Control Model", "C" )
869 # Corrections et compléments des bornes
870 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
871 logging.debug("%s Bounds taken into account"%(self._name,))
873 self._parameters["Bounds"] = None
874 if ("StateBoundsForQuantiles" in self._parameters) \
875 and isinstance(self._parameters["StateBoundsForQuantiles"], (list, tuple)) \
876 and (len(self._parameters["StateBoundsForQuantiles"]) > 0):
877 logging.debug("%s Bounds for quantiles states taken into account"%(self._name,))
878 # Attention : contrairement à Bounds, pas de défaut à None, sinon on ne peut pas être sans bornes
880 # Corrections et compléments de l'initialisation en X
881 if "InitializationPoint" in self._parameters:
883 if self._parameters["InitializationPoint"] is not None and hasattr(self._parameters["InitializationPoint"],'size'):
884 if self._parameters["InitializationPoint"].size != numpy.ravel(Xb).size:
885 raise ValueError("Incompatible size %i of forced initial point that have to replace the background of size %i" \
886 %(self._parameters["InitializationPoint"].size,numpy.ravel(Xb).size))
887 # Obtenu par typecast : numpy.ravel(self._parameters["InitializationPoint"])
889 self._parameters["InitializationPoint"] = numpy.ravel(Xb)
891 if self._parameters["InitializationPoint"] is None:
892 raise ValueError("Forced initial point can not be set without any given Background or required value")
894 # Correction pour pallier a un bug de TNC sur le retour du Minimum
895 if "Minimizer" in self._parameters and self._parameters["Minimizer"] == "TNC":
896 self.setParameterValue("StoreInternalVariables",True)
898 # Verbosité et logging
899 if logging.getLogger().level < logging.WARNING:
900 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
901 self._parameters["optmessages"] = 15
903 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
904 self._parameters["optmessages"] = 0
908 def _post_run(self,_oH=None):
910 if ("StoreSupplementaryCalculations" in self._parameters) and \
911 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
912 for _A in self.StoredVariables["APosterioriCovariance"]:
913 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
914 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
915 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
916 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
917 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
918 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
919 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
920 self.StoredVariables["APosterioriCorrelations"].store( _C )
921 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
923 "%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i",
924 self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
926 "%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i",
927 self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
928 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
929 logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
930 logging.debug("%s Terminé", self._name)
933 def _toStore(self, key):
934 "True if in StoreSupplementaryCalculations, else False"
935 return key in self._parameters["StoreSupplementaryCalculations"]
937 def get(self, key=None):
939 Renvoie l'une des variables stockées identifiée par la clé, ou le
940 dictionnaire de l'ensemble des variables disponibles en l'absence de
941 clé. Ce sont directement les variables sous forme objet qui sont
942 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
943 des classes de persistance.
946 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
948 return self.StoredVariables
950 def __contains__(self, key=None):
951 "D.__contains__(k) -> True if D has a key k, else False"
952 if key is None or key.lower() not in self.__canonical_stored_name:
955 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
958 "D.keys() -> list of D's keys"
959 if hasattr(self, "StoredVariables"):
960 return self.StoredVariables.keys()
965 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
966 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
967 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
972 raise TypeError("pop expected at least 1 arguments, got 0")
973 "If key is not found, d is returned if given, otherwise KeyError is raised"
979 def run(self, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None, Parameters=None):
981 Doit implémenter l'opération élémentaire de calcul algorithmique.
983 raise NotImplementedError("Mathematical algorithmic calculation has not been implemented!")
985 def defineRequiredParameter(self,
997 Permet de définir dans l'algorithme des paramètres requis et leurs
998 caractéristiques par défaut.
1001 raise ValueError("A name is mandatory to define a required parameter.")
1003 self.__required_parameters[name] = {
1004 "default" : default,
1005 "typecast" : typecast,
1008 "listval" : listval,
1009 "listadv" : listadv,
1010 "message" : message,
1011 "oldname" : oldname,
1013 self.__canonical_parameter_name[name.lower()] = name
1014 if oldname is not None:
1015 self.__canonical_parameter_name[oldname.lower()] = name # Conversion
1016 self.__replace_by_the_new_name[oldname.lower()] = name
1017 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
1019 def getRequiredParameters(self, noDetails=True):
1021 Renvoie la liste des noms de paramètres requis ou directement le
1022 dictionnaire des paramètres requis.
1025 return sorted(self.__required_parameters.keys())
1027 return self.__required_parameters
1029 def setParameterValue(self, name=None, value=None):
1031 Renvoie la valeur d'un paramètre requis de manière contrôlée
1033 __k = self.__canonical_parameter_name[name.lower()]
1034 default = self.__required_parameters[__k]["default"]
1035 typecast = self.__required_parameters[__k]["typecast"]
1036 minval = self.__required_parameters[__k]["minval"]
1037 maxval = self.__required_parameters[__k]["maxval"]
1038 listval = self.__required_parameters[__k]["listval"]
1039 listadv = self.__required_parameters[__k]["listadv"]
1041 if value is None and default is None:
1043 elif value is None and default is not None:
1044 if typecast is None: __val = default
1045 else: __val = typecast( default )
1047 if typecast is None: __val = value
1050 __val = typecast( value )
1052 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
1054 if minval is not None and (numpy.array(__val, float) < minval).any():
1055 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
1056 if maxval is not None and (numpy.array(__val, float) > maxval).any():
1057 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
1058 if listval is not None or listadv is not None:
1059 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
1061 if listval is not None and v in listval: continue
1062 elif listadv is not None and v in listadv: continue
1064 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
1065 elif not (listval is not None and __val in listval) and not (listadv is not None and __val in listadv):
1066 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
1070 def requireInputArguments(self, mandatory=(), optional=()):
1072 Permet d'imposer des arguments de calcul requis en entrée.
1074 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
1075 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
1077 def getInputArguments(self):
1079 Permet d'obtenir les listes des arguments de calcul requis en entrée.
1081 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
1083 def setAttributes(self, tags=()):
1085 Permet d'adjoindre des attributs comme les tags de classification.
1086 Renvoie la liste actuelle dans tous les cas.
1088 self.__required_inputs["ClassificationTags"].extend( tags )
1089 return self.__required_inputs["ClassificationTags"]
1091 def __setParameters(self, fromDico={}, reset=False):
1093 Permet de stocker les paramètres reçus dans le dictionnaire interne.
1095 self._parameters.update( fromDico )
1096 __inverse_fromDico_keys = {}
1097 for k in fromDico.keys():
1098 if k.lower() in self.__canonical_parameter_name:
1099 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
1100 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
1101 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
1103 for k in __inverse_fromDico_keys.values():
1104 if k.lower() in self.__replace_by_the_new_name:
1105 __newk = self.__replace_by_the_new_name[k.lower()]
1106 __msg = "the parameter \"%s\" used in \"%s\" algorithm case is deprecated and has to be replaced by \"%s\"."%(k,self._name,__newk)
1107 __msg += " Please update your code."
1108 warnings.warn(__msg, FutureWarning, stacklevel=50)
1110 for k in self.__required_parameters.keys():
1111 if k in __canonic_fromDico_keys:
1112 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
1114 self._parameters[k] = self.setParameterValue(k)
1117 if hasattr(self._parameters[k],"__len__") and len(self._parameters[k]) > 100:
1118 logging.debug("%s %s de longueur %s", self._name, self.__required_parameters[k]["message"], len(self._parameters[k]))
1120 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
1122 def _setInternalState(self, key=None, value=None, fromDico={}, reset=False):
1124 Permet de stocker des variables nommées constituant l'état interne
1126 if reset: # Vide le dictionnaire préalablement
1127 self.__internal_state = {}
1128 if key is not None and value is not None:
1129 self.__internal_state[key] = value
1130 self.__internal_state.update( dict(fromDico) )
1132 def _getInternalState(self, key=None):
1134 Restitue un état interne sous la forme d'un dictionnaire de variables nommées
1136 if key is not None and key in self.__internal_state:
1137 return self.__internal_state[key]
1139 return self.__internal_state
1141 def _getTimeState(self, reset=False):
1143 Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde
1146 self.__initial_cpu_time = time.process_time()
1147 self.__initial_elapsed_time = time.perf_counter()
1150 self.__cpu_time = time.process_time() - self.__initial_cpu_time
1151 self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time
1152 return self.__cpu_time, self.__elapsed_time
1154 def _StopOnTimeLimit(self, X=None, withReason=False):
1155 "Stop criteria on time limit: True/False [+ Reason]"
1156 c, e = self._getTimeState()
1157 if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]:
1158 __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"])
1159 elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]:
1160 __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"])
1162 __SC, __SR = False, ""
1168 # ==============================================================================
1169 class PartialAlgorithm(object):
1171 Classe pour mimer "Algorithm" du point de vue stockage, mais sans aucune
1172 action avancée comme la vérification . Pour les méthodes reprises ici,
1173 le fonctionnement est identique à celles de la classe "Algorithm".
1175 def __init__(self, name):
1176 self._name = str( name )
1177 self._parameters = {"StoreSupplementaryCalculations":[]}
1179 self.StoredVariables = {}
1180 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
1181 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
1182 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
1183 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
1184 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
1185 self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
1187 self.__canonical_stored_name = {}
1188 for k in self.StoredVariables:
1189 self.__canonical_stored_name[k.lower()] = k
1191 def _toStore(self, key):
1192 "True if in StoreSupplementaryCalculations, else False"
1193 return key in self._parameters["StoreSupplementaryCalculations"]
1195 def get(self, key=None):
1197 Renvoie l'une des variables stockées identifiée par la clé, ou le
1198 dictionnaire de l'ensemble des variables disponibles en l'absence de
1199 clé. Ce sont directement les variables sous forme objet qui sont
1200 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
1201 des classes de persistance.
1204 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
1206 return self.StoredVariables
1208 # ==============================================================================
1209 class AlgorithmAndParameters(object):
1211 Classe générale d'interface d'action pour l'algorithme et ses paramètres
1214 name = "GenericAlgorithm",
1221 self.__name = str(name)
1225 self.__algorithm = {}
1226 self.__algorithmFile = None
1227 self.__algorithmName = None
1229 self.updateParameters( asDict, asScript )
1231 if asAlgorithm is None and asScript is not None:
1232 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1234 __Algo = asAlgorithm
1236 if __Algo is not None:
1237 self.__A = str(__Algo)
1238 self.__P.update( {"Algorithm":self.__A} )
1240 self.__setAlgorithm( self.__A )
1242 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
1244 def updateParameters(self,
1248 "Mise à jour des paramètres"
1249 if asDict is None and asScript is not None:
1250 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1254 if __Dict is not None:
1255 self.__P.update( dict(__Dict) )
1257 def executePythonScheme(self, asDictAO = None):
1258 "Permet de lancer le calcul d'assimilation"
1259 Operator.CM.clearCache()
1261 if not isinstance(asDictAO, dict):
1262 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1263 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
1264 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
1265 else: self.__Xb = None
1266 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
1267 else: self.__Y = asDictAO["Observation"]
1268 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
1269 else: self.__U = asDictAO["ControlInput"]
1270 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
1271 else: self.__HO = asDictAO["ObservationOperator"]
1272 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
1273 else: self.__EM = asDictAO["EvolutionModel"]
1274 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
1275 else: self.__CM = asDictAO["ControlModel"]
1276 self.__B = asDictAO["BackgroundError"]
1277 self.__R = asDictAO["ObservationError"]
1278 self.__Q = asDictAO["EvolutionError"]
1280 self.__shape_validate()
1282 self.__algorithm.run(
1292 Parameters = self.__P,
1296 def executeYACSScheme(self, FileName=None):
1297 "Permet de lancer le calcul d'assimilation"
1298 if FileName is None or not os.path.exists(FileName):
1299 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1301 __file = os.path.abspath(FileName)
1302 logging.debug("The YACS file name is \"%s\"."%__file)
1303 if not PlatformInfo.has_salome or \
1304 not PlatformInfo.has_yacs or \
1305 not PlatformInfo.has_adao:
1306 raise ImportError("\n\n"+\
1307 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
1308 "Please load the right environnement before trying to use it.\n")
1311 import SALOMERuntime
1313 SALOMERuntime.RuntimeSALOME_setRuntime()
1315 r = pilot.getRuntime()
1316 xmlLoader = loader.YACSLoader()
1317 xmlLoader.registerProcCataLoader()
1319 catalogAd = r.loadCatalog("proc", __file)
1320 r.addCatalog(catalogAd)
1325 p = xmlLoader.load(__file)
1326 except IOError as ex:
1327 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1329 logger = p.getLogger("parser")
1330 if not logger.isEmpty():
1331 print("The imported YACS XML schema has errors on parsing:")
1332 print(logger.getStr())
1335 print("The YACS XML schema is not valid and will not be executed:")
1336 print(p.getErrorReport())
1338 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1339 p.checkConsistency(info)
1340 if info.areWarningsOrErrors():
1341 print("The YACS XML schema is not coherent and will not be executed:")
1342 print(info.getGlobalRepr())
1344 e = pilot.ExecutorSwig()
1346 if p.getEffectiveState() != pilot.DONE:
1347 print(p.getErrorReport())
1351 def get(self, key = None):
1352 "Vérifie l'existence d'une clé de variable ou de paramètres"
1353 if key in self.__algorithm:
1354 return self.__algorithm.get( key )
1355 elif key in self.__P:
1356 return self.__P[key]
1358 allvariables = self.__P
1359 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1362 def pop(self, k, d):
1363 "Necessaire pour le pickling"
1364 return self.__algorithm.pop(k, d)
1366 def getAlgorithmRequiredParameters(self, noDetails=True):
1367 "Renvoie la liste des paramètres requis selon l'algorithme"
1368 return self.__algorithm.getRequiredParameters(noDetails)
1370 def getAlgorithmInputArguments(self):
1371 "Renvoie la liste des entrées requises selon l'algorithme"
1372 return self.__algorithm.getInputArguments()
1374 def getAlgorithmAttributes(self):
1375 "Renvoie la liste des attributs selon l'algorithme"
1376 return self.__algorithm.setAttributes()
1378 def setObserver(self, __V, __O, __I, __S):
1379 if self.__algorithm is None \
1380 or isinstance(self.__algorithm, dict) \
1381 or not hasattr(self.__algorithm,"StoredVariables"):
1382 raise ValueError("No observer can be build before choosing an algorithm.")
1383 if __V not in self.__algorithm:
1384 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1386 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1389 HookParameters = __I,
1392 def removeObserver(self, __V, __O, __A = False):
1393 if self.__algorithm is None \
1394 or isinstance(self.__algorithm, dict) \
1395 or not hasattr(self.__algorithm,"StoredVariables"):
1396 raise ValueError("No observer can be removed before choosing an algorithm.")
1397 if __V not in self.__algorithm:
1398 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1400 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1405 def hasObserver(self, __V):
1406 if self.__algorithm is None \
1407 or isinstance(self.__algorithm, dict) \
1408 or not hasattr(self.__algorithm,"StoredVariables"):
1410 if __V not in self.__algorithm:
1412 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1415 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1416 for k in self.__variable_names_not_public:
1417 if k in __allvariables: __allvariables.remove(k)
1418 return __allvariables
1420 def __contains__(self, key=None):
1421 "D.__contains__(k) -> True if D has a key k, else False"
1422 return key in self.__algorithm or key in self.__P
1425 "x.__repr__() <==> repr(x)"
1426 return repr(self.__A)+", "+repr(self.__P)
1429 "x.__str__() <==> str(x)"
1430 return str(self.__A)+", "+str(self.__P)
1432 def __setAlgorithm(self, choice = None ):
1434 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1435 d'assimilation. L'argument est un champ caractère se rapportant au nom
1436 d'un algorithme réalisant l'opération sur les arguments fixes.
1439 raise ValueError("Error: algorithm choice has to be given")
1440 if self.__algorithmName is not None:
1441 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1442 daDirectory = "daAlgorithms"
1444 # Recherche explicitement le fichier complet
1445 # ------------------------------------------
1447 for directory in sys.path:
1448 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1449 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1450 if module_path is None:
1452 "No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1454 # Importe le fichier complet comme un module
1455 # ------------------------------------------
1457 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1458 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1459 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1460 raise ImportError("this module does not define a valid elementary algorithm.")
1461 self.__algorithmName = str(choice)
1462 sys.path = sys_path_tmp ; del sys_path_tmp
1463 except ImportError as e:
1465 "The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1467 # Instancie un objet du type élémentaire du fichier
1468 # -------------------------------------------------
1469 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1472 def __shape_validate(self):
1474 Validation de la correspondance correcte des tailles des variables et
1475 des matrices s'il y en a.
1477 if self.__Xb is None: __Xb_shape = (0,)
1478 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1479 elif hasattr(self.__Xb,"shape"):
1480 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1481 else: __Xb_shape = self.__Xb.shape()
1482 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1484 if self.__Y is None: __Y_shape = (0,)
1485 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1486 elif hasattr(self.__Y,"shape"):
1487 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1488 else: __Y_shape = self.__Y.shape()
1489 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1491 if self.__U is None: __U_shape = (0,)
1492 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1493 elif hasattr(self.__U,"shape"):
1494 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1495 else: __U_shape = self.__U.shape()
1496 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1498 if self.__B is None: __B_shape = (0,0)
1499 elif hasattr(self.__B,"shape"):
1500 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1501 else: __B_shape = self.__B.shape()
1502 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1504 if self.__R is None: __R_shape = (0,0)
1505 elif hasattr(self.__R,"shape"):
1506 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1507 else: __R_shape = self.__R.shape()
1508 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1510 if self.__Q is None: __Q_shape = (0,0)
1511 elif hasattr(self.__Q,"shape"):
1512 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1513 else: __Q_shape = self.__Q.shape()
1514 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1516 if len(self.__HO) == 0: __HO_shape = (0,0)
1517 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1518 elif hasattr(self.__HO["Direct"],"shape"):
1519 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1520 else: __HO_shape = self.__HO["Direct"].shape()
1521 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1523 if len(self.__EM) == 0: __EM_shape = (0,0)
1524 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1525 elif hasattr(self.__EM["Direct"],"shape"):
1526 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1527 else: __EM_shape = self.__EM["Direct"].shape()
1528 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1530 if len(self.__CM) == 0: __CM_shape = (0,0)
1531 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1532 elif hasattr(self.__CM["Direct"],"shape"):
1533 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1534 else: __CM_shape = self.__CM["Direct"].shape()
1535 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1537 # Vérification des conditions
1538 # ---------------------------
1539 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1540 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1541 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1542 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1544 if not( min(__B_shape) == max(__B_shape) ):
1545 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1546 if not( min(__R_shape) == max(__R_shape) ):
1547 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1548 if not( min(__Q_shape) == max(__Q_shape) ):
1549 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1550 if not( min(__EM_shape) == max(__EM_shape) ):
1551 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1553 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1555 "Shape characteristic of observation operator (H)"+\
1556 " \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1557 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1559 "Shape characteristic of observation operator (H)"+\
1560 " \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1561 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1563 "Shape characteristic of observation operator (H)"+\
1564 " \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1565 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1567 "Shape characteristic of observation operator (H)"+\
1568 " \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1570 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1571 if self.__algorithmName in ["EnsembleBlue",]:
1572 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1573 self.__Xb = Persistence.OneVector("Background")
1574 for member in asPersistentVector:
1575 self.__Xb.store( numpy.asarray(member, dtype=float) )
1576 __Xb_shape = min(__B_shape)
1579 "Shape characteristic of a priori errors covariance matrix (B)"+\
1580 " \"%s\" and background vector (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1582 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1584 "Shape characteristic of observation errors covariance matrix (R)"+\
1585 " \"%s\" and observation vector (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1587 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1589 "Shape characteristic of evolution model (EM)"+\
1590 " \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1592 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1594 "Shape characteristic of control model (CM)"+\
1595 " \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1597 if ("Bounds" in self.__P) \
1598 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1599 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1600 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1601 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1603 if ("StateBoundsForQuantiles" in self.__P) \
1604 and (isinstance(self.__P["StateBoundsForQuantiles"], list) or isinstance(self.__P["StateBoundsForQuantiles"], tuple)) \
1605 and (len(self.__P["StateBoundsForQuantiles"]) != max(__Xb_shape)):
1606 raise ValueError("The number \"%s\" of bound pairs for the quantile state (X) components is different of the size \"%s\" of the state itself." \
1607 %(len(self.__P["StateBoundsForQuantiles"]),max(__Xb_shape)))
1611 # ==============================================================================
1612 class RegulationAndParameters(object):
1614 Classe générale d'interface d'action pour la régulation et ses paramètres
1617 name = "GenericRegulation",
1624 self.__name = str(name)
1627 if asAlgorithm is None and asScript is not None:
1628 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1630 __Algo = asAlgorithm
1632 if asDict is None and asScript is not None:
1633 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1637 if __Dict is not None:
1638 self.__P.update( dict(__Dict) )
1640 if __Algo is not None:
1641 self.__P.update( {"Algorithm":str(__Algo)} )
1643 def get(self, key = None):
1644 "Vérifie l'existence d'une clé de variable ou de paramètres"
1646 return self.__P[key]
1650 # ==============================================================================
1651 class DataObserver(object):
1653 Classe générale d'interface de type observer
1656 name = "GenericObserver",
1668 self.__name = str(name)
1673 if onVariable is None:
1674 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1675 elif type(onVariable) in (tuple, list):
1676 self.__V = tuple(map( str, onVariable ))
1677 if withInfo is None:
1680 self.__I = (str(withInfo),)*len(self.__V)
1681 elif isinstance(onVariable, str):
1682 self.__V = (onVariable,)
1683 if withInfo is None:
1684 self.__I = (onVariable,)
1686 self.__I = (str(withInfo),)
1688 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1690 if asObsObject is not None:
1691 self.__O = asObsObject
1693 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1694 __Function = Observer2Func(__FunctionText)
1695 self.__O = __Function.getfunc()
1697 for k in range(len(self.__V)):
1700 if ename not in withAlgo:
1701 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1703 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1706 "x.__repr__() <==> repr(x)"
1707 return repr(self.__V)+"\n"+repr(self.__O)
1710 "x.__str__() <==> str(x)"
1711 return str(self.__V)+"\n"+str(self.__O)
1713 # ==============================================================================
1714 class UserScript(object):
1716 Classe générale d'interface de type texte de script utilisateur
1719 name = "GenericUserScript",
1726 self.__name = str(name)
1728 if asString is not None:
1730 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1731 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1732 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1733 self.__F = Templates.ObserverTemplates[asTemplate]
1734 elif asScript is not None:
1735 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1740 "x.__repr__() <==> repr(x)"
1741 return repr(self.__F)
1744 "x.__str__() <==> str(x)"
1745 return str(self.__F)
1747 # ==============================================================================
1748 class ExternalParameters(object):
1750 Classe générale d'interface pour le stockage des paramètres externes
1753 name = "GenericExternalParameters",
1759 self.__name = str(name)
1762 self.updateParameters( asDict, asScript )
1764 def updateParameters(self,
1768 "Mise à jour des paramètres"
1769 if asDict is None and asScript is not None:
1770 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1774 if __Dict is not None:
1775 self.__P.update( dict(__Dict) )
1777 def get(self, key = None):
1779 return self.__P[key]
1781 return list(self.__P.keys())
1784 return list(self.__P.keys())
1786 def pop(self, k, d):
1787 return self.__P.pop(k, d)
1790 return self.__P.items()
1792 def __contains__(self, key=None):
1793 "D.__contains__(k) -> True if D has a key k, else False"
1794 return key in self.__P
1796 # ==============================================================================
1797 class State(object):
1799 Classe générale d'interface de type état
1802 name = "GenericVector",
1804 asPersistentVector = None,
1810 toBeChecked = False,
1813 Permet de définir un vecteur :
1814 - asVector : entrée des données, comme un vecteur compatible avec le
1815 constructeur de numpy.matrix, ou "True" si entrée par script.
1816 - asPersistentVector : entrée des données, comme une série de vecteurs
1817 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1818 type Persistence, ou "True" si entrée par script.
1819 - asScript : si un script valide est donné contenant une variable
1820 nommée "name", la variable est de type "asVector" (par défaut) ou
1821 "asPersistentVector" selon que l'une de ces variables est placée à
1823 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1824 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1825 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1826 nommée "name"), on récupère les colonnes et on les range ligne après
1827 ligne (colMajor=False, par défaut) ou colonne après colonne
1828 (colMajor=True). La variable résultante est de type "asVector" (par
1829 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1832 self.__name = str(name)
1833 self.__check = bool(toBeChecked)
1837 self.__is_vector = False
1838 self.__is_series = False
1840 if asScript is not None:
1841 __Vector, __Series = None, None
1842 if asPersistentVector:
1843 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1845 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1846 elif asDataFile is not None:
1847 __Vector, __Series = None, None
1848 if asPersistentVector:
1849 if colNames is not None:
1850 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1852 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1853 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1854 __Series = numpy.transpose(__Series)
1855 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1856 __Series = numpy.transpose(__Series)
1858 if colNames is not None:
1859 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1861 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1863 __Vector = numpy.ravel(__Vector, order = "F")
1865 __Vector = numpy.ravel(__Vector, order = "C")
1867 __Vector, __Series = asVector, asPersistentVector
1869 if __Vector is not None:
1870 self.__is_vector = True
1871 if isinstance(__Vector, str):
1872 __Vector = PlatformInfo.strvect2liststr( __Vector )
1873 self.__V = numpy.ravel(numpy.asarray( __Vector, dtype=float )).reshape((-1,1))
1874 self.shape = self.__V.shape
1875 self.size = self.__V.size
1876 elif __Series is not None:
1877 self.__is_series = True
1878 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1879 self.__V = Persistence.OneVector(self.__name)
1880 if isinstance(__Series, str):
1881 __Series = PlatformInfo.strmatrix2liststr(__Series)
1882 for member in __Series:
1883 if isinstance(member, str):
1884 member = PlatformInfo.strvect2liststr( member )
1885 self.__V.store(numpy.asarray( member, dtype=float ))
1888 if isinstance(self.__V.shape, (tuple, list)):
1889 self.shape = self.__V.shape
1891 self.shape = self.__V.shape()
1892 if len(self.shape) == 1:
1893 self.shape = (self.shape[0],1)
1894 self.size = self.shape[0] * self.shape[1]
1897 "The %s object is improperly defined or undefined,"%self.__name+\
1898 " it requires at minima either a vector, a list/tuple of"+\
1899 " vectors or a persistent object. Please check your vector input.")
1901 if scheduledBy is not None:
1902 self.__T = scheduledBy
1904 def getO(self, withScheduler=False):
1906 return self.__V, self.__T
1907 elif self.__T is None:
1913 "Vérification du type interne"
1914 return self.__is_vector
1917 "Vérification du type interne"
1918 return self.__is_series
1921 "x.__repr__() <==> repr(x)"
1922 return repr(self.__V)
1925 "x.__str__() <==> str(x)"
1926 return str(self.__V)
1928 # ==============================================================================
1929 class Covariance(object):
1931 Classe générale d'interface de type covariance
1934 name = "GenericCovariance",
1935 asCovariance = None,
1936 asEyeByScalar = None,
1937 asEyeByVector = None,
1940 toBeChecked = False,
1943 Permet de définir une covariance :
1944 - asCovariance : entrée des données, comme une matrice compatible avec
1945 le constructeur de numpy.matrix
1946 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1947 multiplicatif d'une matrice de corrélation identité, aucune matrice
1948 n'étant donc explicitement à donner
1949 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1950 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1951 n'étant donc explicitement à donner
1952 - asCovObject : entrée des données comme un objet python, qui a les
1953 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1954 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1955 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1956 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1957 pleine doit être vérifié
1959 self.__name = str(name)
1960 self.__check = bool(toBeChecked)
1963 self.__is_scalar = False
1964 self.__is_vector = False
1965 self.__is_matrix = False
1966 self.__is_object = False
1968 if asScript is not None:
1969 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1971 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1973 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1975 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1977 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1979 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1981 if __Scalar is not None:
1982 if isinstance(__Scalar, str):
1983 __Scalar = PlatformInfo.strvect2liststr( __Scalar )
1984 if len(__Scalar) > 0: __Scalar = __Scalar[0]
1985 if numpy.array(__Scalar).size != 1:
1987 " The diagonal multiplier given to define a sparse matrix is"+\
1988 " not a unique scalar value.\n Its actual measured size is"+\
1989 " %i. Please check your scalar input."%numpy.array(__Scalar).size)
1990 self.__is_scalar = True
1991 self.__C = numpy.abs( float(__Scalar) )
1994 elif __Vector is not None:
1995 if isinstance(__Vector, str):
1996 __Vector = PlatformInfo.strvect2liststr( __Vector )
1997 self.__is_vector = True
1998 self.__C = numpy.abs( numpy.ravel(numpy.asarray( __Vector, dtype=float )) )
1999 self.shape = (self.__C.size,self.__C.size)
2000 self.size = self.__C.size**2
2001 elif __Matrix is not None:
2002 self.__is_matrix = True
2003 self.__C = numpy.matrix( __Matrix, float )
2004 self.shape = self.__C.shape
2005 self.size = self.__C.size
2006 elif __Object is not None:
2007 self.__is_object = True
2009 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__matmul__","__mul__","__rmatmul__","__rmul__"):
2010 if not hasattr(self.__C,at):
2011 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
2012 if hasattr(self.__C,"shape"):
2013 self.shape = self.__C.shape
2016 if hasattr(self.__C,"size"):
2017 self.size = self.__C.size
2025 def __validate(self):
2027 if self.__C is None:
2028 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
2029 if self.ismatrix() and min(self.shape) != max(self.shape):
2030 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
2031 if self.isobject() and min(self.shape) != max(self.shape):
2032 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
2033 if self.isscalar() and self.__C <= 0:
2034 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
2035 if self.isvector() and (self.__C <= 0).any():
2036 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
2037 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
2039 numpy.linalg.cholesky( self.__C )
2041 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
2042 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
2046 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
2049 "Vérification du type interne"
2050 return self.__is_scalar
2053 "Vérification du type interne"
2054 return self.__is_vector
2057 "Vérification du type interne"
2058 return self.__is_matrix
2061 "Vérification du type interne"
2062 return self.__is_object
2067 return Covariance(self.__name+"I", asCovariance = numpy.linalg.inv(self.__C) )
2068 elif self.isvector():
2069 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
2070 elif self.isscalar():
2071 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
2072 elif self.isobject() and hasattr(self.__C,"getI"):
2073 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
2075 return None # Indispensable
2080 return Covariance(self.__name+"T", asCovariance = self.__C.T )
2081 elif self.isvector():
2082 return Covariance(self.__name+"T", asEyeByVector = self.__C )
2083 elif self.isscalar():
2084 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
2085 elif self.isobject() and hasattr(self.__C,"getT"):
2086 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
2088 raise AttributeError("the %s covariance matrix has no getT attribute."%(self.__name,))
2091 "Décomposition de Cholesky"
2093 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
2094 elif self.isvector():
2095 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
2096 elif self.isscalar():
2097 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
2098 elif self.isobject() and hasattr(self.__C,"cholesky"):
2099 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
2101 raise AttributeError("the %s covariance matrix has no cholesky attribute."%(self.__name,))
2103 def choleskyI(self):
2104 "Inversion de la décomposition de Cholesky"
2106 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.linalg.cholesky(self.__C)) )
2107 elif self.isvector():
2108 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
2109 elif self.isscalar():
2110 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
2111 elif self.isobject() and hasattr(self.__C,"choleskyI"):
2112 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
2114 raise AttributeError("the %s covariance matrix has no choleskyI attribute."%(self.__name,))
2117 "Racine carrée matricielle"
2120 return Covariance(self.__name+"C", asCovariance = numpy.real(scipy.linalg.sqrtm(self.__C)) )
2121 elif self.isvector():
2122 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
2123 elif self.isscalar():
2124 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
2125 elif self.isobject() and hasattr(self.__C,"sqrtm"):
2126 return Covariance(self.__name+"C", asCovObject = self.__C.sqrtm() )
2128 raise AttributeError("the %s covariance matrix has no sqrtm attribute."%(self.__name,))
2131 "Inversion de la racine carrée matricielle"
2134 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.real(scipy.linalg.sqrtm(self.__C))) )
2135 elif self.isvector():
2136 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
2137 elif self.isscalar():
2138 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
2139 elif self.isobject() and hasattr(self.__C,"sqrtmI"):
2140 return Covariance(self.__name+"H", asCovObject = self.__C.sqrtmI() )
2142 raise AttributeError("the %s covariance matrix has no sqrtmI attribute."%(self.__name,))
2144 def diag(self, msize=None):
2145 "Diagonale de la matrice"
2147 return numpy.diag(self.__C)
2148 elif self.isvector():
2150 elif self.isscalar():
2152 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2154 return self.__C * numpy.ones(int(msize))
2155 elif self.isobject() and hasattr(self.__C,"diag"):
2156 return self.__C.diag()
2158 raise AttributeError("the %s covariance matrix has no diag attribute."%(self.__name,))
2160 def trace(self, msize=None):
2161 "Trace de la matrice"
2163 return numpy.trace(self.__C)
2164 elif self.isvector():
2165 return float(numpy.sum(self.__C))
2166 elif self.isscalar():
2168 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2170 return self.__C * int(msize)
2171 elif self.isobject():
2172 return self.__C.trace()
2174 raise AttributeError("the %s covariance matrix has no trace attribute."%(self.__name,))
2176 def asfullmatrix(self, msize=None):
2179 return numpy.asarray(self.__C, dtype=float)
2180 elif self.isvector():
2181 return numpy.asarray( numpy.diag(self.__C), dtype=float )
2182 elif self.isscalar():
2184 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2186 return numpy.asarray( self.__C * numpy.eye(int(msize)), dtype=float )
2187 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
2188 return self.__C.asfullmatrix()
2190 raise AttributeError("the %s covariance matrix has no asfullmatrix attribute."%(self.__name,))
2192 def assparsematrix(self):
2200 "x.__repr__() <==> repr(x)"
2201 return repr(self.__C)
2204 "x.__str__() <==> str(x)"
2205 return str(self.__C)
2207 def __add__(self, other):
2208 "x.__add__(y) <==> x+y"
2209 if self.ismatrix() or self.isobject():
2210 return self.__C + numpy.asmatrix(other)
2211 elif self.isvector() or self.isscalar():
2212 _A = numpy.asarray(other)
2213 if len(_A.shape) == 1:
2214 _A.reshape((-1,1))[::2] += self.__C
2216 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
2217 return numpy.asmatrix(_A)
2219 def __radd__(self, other):
2220 "x.__radd__(y) <==> y+x"
2221 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
2223 def __sub__(self, other):
2224 "x.__sub__(y) <==> x-y"
2225 if self.ismatrix() or self.isobject():
2226 return self.__C - numpy.asmatrix(other)
2227 elif self.isvector() or self.isscalar():
2228 _A = numpy.asarray(other)
2229 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
2230 return numpy.asmatrix(_A)
2232 def __rsub__(self, other):
2233 "x.__rsub__(y) <==> y-x"
2234 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
2237 "x.__neg__() <==> -x"
2240 def __matmul__(self, other):
2241 "x.__mul__(y) <==> x@y"
2242 if self.ismatrix() and isinstance(other, (int, float)):
2243 return numpy.asarray(self.__C) * other
2244 elif self.ismatrix() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2245 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2246 return numpy.ravel(self.__C @ numpy.ravel(other))
2247 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2248 return numpy.asarray(self.__C) @ numpy.asarray(other)
2250 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asarray(other).shape,self.__name))
2251 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2252 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2253 return numpy.ravel(self.__C) * numpy.ravel(other)
2254 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2255 return numpy.ravel(self.__C).reshape((-1,1)) * numpy.asarray(other)
2257 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2258 elif self.isscalar() and isinstance(other,numpy.matrix):
2259 return numpy.asarray(self.__C * other)
2260 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2261 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2262 return self.__C * numpy.ravel(other)
2264 return self.__C * numpy.asarray(other)
2265 elif self.isobject():
2266 return self.__C.__matmul__(other)
2268 raise NotImplementedError("%s covariance matrix __matmul__ method not available for %s type!"%(self.__name,type(other)))
2270 def __mul__(self, other):
2271 "x.__mul__(y) <==> x*y"
2272 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2273 return self.__C * other
2274 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2275 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2276 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2277 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2278 return self.__C * numpy.asmatrix(other)
2281 "operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
2282 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2283 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2284 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
2285 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2286 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
2289 "operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2290 elif self.isscalar() and isinstance(other,numpy.matrix):
2291 return self.__C * other
2292 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2293 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2294 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2296 return self.__C * numpy.asmatrix(other)
2297 elif self.isobject():
2298 return self.__C.__mul__(other)
2300 raise NotImplementedError(
2301 "%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
2303 def __rmatmul__(self, other):
2304 "x.__rmul__(y) <==> y@x"
2305 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2306 return other * self.__C
2307 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2308 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2309 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2310 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2311 return numpy.asmatrix(other) * self.__C
2314 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2315 elif self.isvector() and isinstance(other,numpy.matrix):
2316 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2317 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2318 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2319 return numpy.asmatrix(numpy.array(other) * self.__C)
2322 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2323 elif self.isscalar() and isinstance(other,numpy.matrix):
2324 return other * self.__C
2325 elif self.isobject():
2326 return self.__C.__rmatmul__(other)
2328 raise NotImplementedError(
2329 "%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name,type(other)))
2331 def __rmul__(self, other):
2332 "x.__rmul__(y) <==> y*x"
2333 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2334 return other * self.__C
2335 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2336 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2337 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2338 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2339 return numpy.asmatrix(other) * self.__C
2342 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2343 elif self.isvector() and isinstance(other,numpy.matrix):
2344 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2345 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2346 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2347 return numpy.asmatrix(numpy.array(other) * self.__C)
2350 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2351 elif self.isscalar() and isinstance(other,numpy.matrix):
2352 return other * self.__C
2353 elif self.isscalar() and isinstance(other,float):
2354 return other * self.__C
2355 elif self.isobject():
2356 return self.__C.__rmul__(other)
2358 raise NotImplementedError(
2359 "%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
2362 "x.__len__() <==> len(x)"
2363 return self.shape[0]
2365 # ==============================================================================
2366 class Observer2Func(object):
2368 Création d'une fonction d'observateur a partir de son texte
2370 def __init__(self, corps=""):
2371 self.__corps = corps
2372 def func(self,var,info):
2373 "Fonction d'observation"
2376 "Restitution du pointeur de fonction dans l'objet"
2379 # ==============================================================================
2380 class CaseLogger(object):
2382 Conservation des commandes de création d'un cas
2384 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
2385 self.__name = str(__name)
2386 self.__objname = str(__objname)
2387 self.__logSerie = []
2388 self.__switchoff = False
2390 "TUI" :Interfaces._TUIViewer,
2391 "SCD" :Interfaces._SCDViewer,
2392 "YACS":Interfaces._YACSViewer,
2393 "SimpleReportInRst":Interfaces._SimpleReportInRstViewer,
2394 "SimpleReportInHtml":Interfaces._SimpleReportInHtmlViewer,
2395 "SimpleReportInPlainTxt":Interfaces._SimpleReportInPlainTxtViewer,
2398 "TUI" :Interfaces._TUIViewer,
2399 "COM" :Interfaces._COMViewer,
2401 if __addViewers is not None:
2402 self.__viewers.update(dict(__addViewers))
2403 if __addLoaders is not None:
2404 self.__loaders.update(dict(__addLoaders))
2406 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
2407 "Enregistrement d'une commande individuelle"
2408 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
2409 if "self" in __keys: __keys.remove("self")
2410 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
2412 self.__switchoff = True
2414 self.__switchoff = False
2416 def dump(self, __filename=None, __format="TUI", __upa=""):
2417 "Restitution normalisée des commandes"
2418 if __format in self.__viewers:
2419 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2421 raise ValueError("Dumping as \"%s\" is not available"%__format)
2422 return __formater.dump(__filename, __upa)
2424 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2425 "Chargement normalisé des commandes"
2426 if __format in self.__loaders:
2427 __formater = self.__loaders[__format]()
2429 raise ValueError("Loading as \"%s\" is not available"%__format)
2430 return __formater.load(__filename, __content, __object)
2432 # ==============================================================================
2435 _extraArguments = None,
2436 _sFunction = lambda x: x,
2441 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2442 correspondante de valeurs de la fonction en argument
2444 # Vérifications et définitions initiales
2445 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2446 if not PlatformInfo.isIterable( __xserie ):
2447 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2449 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2452 __mpWorkers = int(_mpWorkers)
2454 import multiprocessing
2465 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2466 with multiprocessing.Pool(__mpWorkers) as pool:
2467 __multiHX = pool.map( _sFunction, _jobs )
2470 # logging.debug("MULTF Internal multiprocessing calculation end")
2472 # logging.debug("MULTF Internal monoprocessing calculation begin")
2474 if _extraArguments is None:
2475 for __xvalue in __xserie:
2476 __multiHX.append( _sFunction( __xvalue ) )
2477 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2478 for __xvalue in __xserie:
2479 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2480 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2481 for __xvalue in __xserie:
2482 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2484 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2485 # logging.debug("MULTF Internal monoprocessing calculation end")
2487 # logging.debug("MULTF Internal multifonction calculations end")
2490 # ==============================================================================
2491 if __name__ == "__main__":
2492 print('\n AUTODIAGNOSTIC\n')