1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2023 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
36 from functools import partial
37 from daCore import Persistence, PlatformInfo, Interfaces
38 from daCore import Templates
40 # ==============================================================================
41 class CacheManager(object):
43 Classe générale de gestion d'un cache de calculs
46 toleranceInRedundancy = 1.e-18,
47 lengthOfRedundancy = -1,
50 Les caractéristiques de tolérance peuvent être modifiées à la création.
52 self.__tolerBP = float(toleranceInRedundancy)
53 self.__lengthOR = int(lengthOfRedundancy)
54 self.__initlnOR = self.__lengthOR
64 def wasCalculatedIn(self, xValue, oName="" ):
65 "Vérifie l'existence d'un calcul correspondant à la valeur"
69 for i in range(min(len(self.__listOPCV),self.__lengthOR)-1,-1,-1):
70 if not hasattr(xValue, 'size'):
72 elif (str(oName) != self.__listOPCV[i][3]):
74 elif (xValue.size != self.__listOPCV[i][0].size):
76 elif (numpy.ravel(xValue)[0] - self.__listOPCV[i][0][0]) > (self.__tolerBP * self.__listOPCV[i][2] / self.__listOPCV[i][0].size):
78 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < (self.__tolerBP * self.__listOPCV[i][2]):
80 __HxV = self.__listOPCV[i][1]
84 def storeValueInX(self, xValue, HxValue, oName="" ):
85 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
86 if self.__lengthOR < 0:
87 self.__lengthOR = 2 * min(numpy.size(xValue), 50) + 2
88 self.__initlnOR = self.__lengthOR
89 self.__seenNames.append(str(oName))
90 if str(oName) not in self.__seenNames: # Etend la liste si nouveau
91 self.__lengthOR += 2 * min(numpy.size(xValue), 50) + 2
92 self.__initlnOR += self.__lengthOR
93 self.__seenNames.append(str(oName))
94 while len(self.__listOPCV) > self.__lengthOR:
95 self.__listOPCV.pop(0)
96 self.__listOPCV.append( (
97 copy.copy(numpy.ravel(xValue)), # 0 Previous point
98 copy.copy(HxValue), # 1 Previous value
99 numpy.linalg.norm(xValue), # 2 Norm
100 str(oName), # 3 Operator name
105 self.__initlnOR = self.__lengthOR
107 self.__enabled = False
111 self.__lengthOR = self.__initlnOR
112 self.__enabled = True
114 # ==============================================================================
115 class Operator(object):
117 Classe générale d'interface de type opérateur simple
125 name = "GenericOperator",
128 avoidingRedundancy = True,
129 reducingMemoryUse = False,
130 inputAsMultiFunction = False,
131 enableMultiProcess = False,
132 extraArguments = None,
135 On construit un objet de ce type en fournissant, à l'aide de l'un des
136 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
139 - name : nom d'opérateur
140 - fromMethod : argument de type fonction Python
141 - fromMatrix : argument adapté au constructeur numpy.array/matrix
142 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
143 - reducingMemoryUse : booléen forçant (ou pas) des calculs moins
145 - inputAsMultiFunction : booléen indiquant une fonction explicitement
146 définie (ou pas) en multi-fonction
147 - extraArguments : arguments supplémentaires passés à la fonction de
148 base et ses dérivées (tuple ou dictionnaire)
150 self.__name = str(name)
151 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
152 self.__reduceM = bool( reducingMemoryUse )
153 self.__avoidRC = bool( avoidingRedundancy )
154 self.__inputAsMF = bool( inputAsMultiFunction )
155 self.__mpEnabled = bool( enableMultiProcess )
156 self.__extraArgs = extraArguments
157 if fromMethod is not None and self.__inputAsMF:
158 self.__Method = fromMethod # logtimer(fromMethod)
160 self.__Type = "Method"
161 elif fromMethod is not None and not self.__inputAsMF:
162 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
164 self.__Type = "Method"
165 elif fromMatrix is not None:
167 if isinstance(fromMatrix, str):
168 fromMatrix = PlatformInfo.strmatrix2liststr( fromMatrix )
169 self.__Matrix = numpy.asarray( fromMatrix, dtype=float )
170 self.__Type = "Matrix"
176 def disableAvoidingRedundancy(self):
178 Operator.CM.disable()
180 def enableAvoidingRedundancy(self):
185 Operator.CM.disable()
191 def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False):
193 Permet de restituer le résultat de l'application de l'opérateur à une
194 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
195 argument devant a priori être du bon type.
197 - les arguments par série sont :
198 - xValue : argument adapté pour appliquer l'opérateur
199 - HValue : valeur précalculée de l'opérateur en ce point
200 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
207 if HValue is not None:
211 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
213 if _HValue is not None:
214 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
216 for i in range(len(_HValue)):
217 _HxValue.append( _HValue[i] )
219 Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name)
224 for i, xv in enumerate(_xValue):
226 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
228 __alreadyCalculated = False
230 if __alreadyCalculated:
231 self.__addOneCacheCall()
234 if self.__Matrix is not None:
235 self.__addOneMatrixCall()
236 _hv = self.__Matrix @ numpy.ravel(xv)
238 self.__addOneMethodCall()
242 _HxValue.append( _hv )
244 if len(_xserie)>0 and self.__Matrix is None:
245 if self.__extraArgs is None:
246 _hserie = self.__Method( _xserie ) # Calcul MF
248 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
249 if not hasattr(_hserie, "pop"):
251 "The user input multi-function doesn't seem to return a"+\
252 " result sequence, behaving like a mono-function. It has"+\
260 Operator.CM.storeValueInX(_xv,_hv,self.__name)
262 if returnSerieAsArrayMatrix:
263 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
265 if argsAsSerie: return _HxValue
266 else: return _HxValue[-1]
268 def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
270 Permet de restituer le résultat de l'application de l'opérateur à des
271 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
272 argument devant a priori être du bon type. Si la uValue est None,
273 on suppose que l'opérateur ne s'applique qu'à xValue.
275 - paires : les arguments par paire sont :
276 - xValue : argument X adapté pour appliquer l'opérateur
277 - uValue : argument U adapté pour appliquer l'opérateur
278 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
280 if argsAsSerie: _xuValue = paires
281 else: _xuValue = (paires,)
282 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
284 if self.__Matrix is not None:
286 for paire in _xuValue:
287 _xValue, _uValue = paire
288 self.__addOneMatrixCall()
289 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
292 for paire in _xuValue:
293 _xValue, _uValue = paire
294 if _uValue is not None:
295 _xuArgs.append( paire )
297 _xuArgs.append( _xValue )
298 self.__addOneMethodCall( len(_xuArgs) )
299 if self.__extraArgs is None:
300 _HxValue = self.__Method( _xuArgs ) # Calcul MF
302 _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF
304 if returnSerieAsArrayMatrix:
305 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
307 if argsAsSerie: return _HxValue
308 else: return _HxValue[-1]
310 def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
312 Permet de restituer le résultat de l'application de l'opérateur à une
313 série d'arguments xValue, sachant que l'opérateur est valable en
314 xNominal. Cette méthode se contente d'appliquer, son argument devant a
315 priori être du bon type. Si l'opérateur est linéaire car c'est une
316 matrice, alors il est valable en tout point nominal et xNominal peut
317 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
318 permet d'indiquer que l'argument est multi-paires.
320 - paires : les arguments par paire sont :
321 - xNominal : série d'arguments permettant de donner le point où
322 l'opérateur est construit pour être ensuite appliqué
323 - xValue : série d'arguments adaptés pour appliquer l'opérateur
324 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
326 if argsAsSerie: _nxValue = paires
327 else: _nxValue = (paires,)
328 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
330 if self.__Matrix is not None:
332 for paire in _nxValue:
333 _xNominal, _xValue = paire
334 self.__addOneMatrixCall()
335 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
337 self.__addOneMethodCall( len(_nxValue) )
338 if self.__extraArgs is None:
339 _HxValue = self.__Method( _nxValue ) # Calcul MF
341 _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
343 if returnSerieAsArrayMatrix:
344 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
346 if argsAsSerie: return _HxValue
347 else: return _HxValue[-1]
349 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
351 Permet de renvoyer l'opérateur sous la forme d'une matrice
353 if self.__Matrix is not None:
354 self.__addOneMatrixCall()
355 mValue = [self.__Matrix,]
356 elif not isinstance(ValueForMethodForm,str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
359 self.__addOneMethodCall( len(ValueForMethodForm) )
360 for _vfmf in ValueForMethodForm:
361 mValue.append( self.__Method(((_vfmf, None),)) )
363 self.__addOneMethodCall()
364 mValue = self.__Method(((ValueForMethodForm, None),))
366 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
368 if argsAsSerie: return mValue
369 else: return mValue[-1]
373 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
374 la forme d'une matrice
376 if self.__Matrix is not None:
377 return self.__Matrix.shape
379 raise ValueError("Matrix form of the operator is not available, nor the shape")
381 def nbcalls(self, which=None):
383 Renvoie les nombres d'évaluations de l'opérateur
386 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
387 self.__NbCallsAsMatrix,
388 self.__NbCallsAsMethod,
389 self.__NbCallsOfCached,
390 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
391 Operator.NbCallsAsMatrix,
392 Operator.NbCallsAsMethod,
393 Operator.NbCallsOfCached,
395 if which is None: return __nbcalls
396 else: return __nbcalls[which]
398 def __addOneMatrixCall(self):
399 "Comptabilise un appel"
400 self.__NbCallsAsMatrix += 1 # Decompte local
401 Operator.NbCallsAsMatrix += 1 # Decompte global
403 def __addOneMethodCall(self, nb = 1):
404 "Comptabilise un appel"
405 self.__NbCallsAsMethod += nb # Decompte local
406 Operator.NbCallsAsMethod += nb # Decompte global
408 def __addOneCacheCall(self):
409 "Comptabilise un appel"
410 self.__NbCallsOfCached += 1 # Decompte local
411 Operator.NbCallsOfCached += 1 # Decompte global
413 # ==============================================================================
414 class FullOperator(object):
416 Classe générale d'interface de type opérateur complet
417 (Direct, Linéaire Tangent, Adjoint)
420 name = "GenericFullOperator",
422 asOneFunction = None, # 1 Fonction
423 asThreeFunctions = None, # 3 Fonctions in a dictionary
424 asScript = None, # 1 or 3 Fonction(s) by script
425 asDict = None, # Parameters
427 extraArguments = None,
428 performancePrf = None,
429 inputAsMF = False,# Fonction(s) as Multi-Functions
434 self.__name = str(name)
435 self.__check = bool(toBeChecked)
436 self.__extraArgs = extraArguments
441 if (asDict is not None) and isinstance(asDict, dict):
442 __Parameters.update( asDict )
443 # Priorité à EnableMultiProcessingInDerivatives=True
444 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
445 __Parameters["EnableMultiProcessingInDerivatives"] = True
446 __Parameters["EnableMultiProcessingInEvaluation"] = False
447 if "EnableMultiProcessingInDerivatives" not in __Parameters:
448 __Parameters["EnableMultiProcessingInDerivatives"] = False
449 if __Parameters["EnableMultiProcessingInDerivatives"]:
450 __Parameters["EnableMultiProcessingInEvaluation"] = False
451 if "EnableMultiProcessingInEvaluation" not in __Parameters:
452 __Parameters["EnableMultiProcessingInEvaluation"] = False
453 if "withIncrement" in __Parameters: # Temporaire
454 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
455 # Le défaut est équivalent à "ReducedOverallRequirements"
456 __reduceM, __avoidRC = True, True
457 if performancePrf is not None:
458 if performancePrf == "ReducedAmountOfCalculation":
459 __reduceM, __avoidRC = False, True
460 elif performancePrf == "ReducedMemoryFootprint":
461 __reduceM, __avoidRC = True, False
462 elif performancePrf == "NoSavings":
463 __reduceM, __avoidRC = False, False
465 if asScript is not None:
466 __Matrix, __Function = None, None
468 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
470 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
471 __Function.update({"useApproximatedDerivatives":True})
472 __Function.update(__Parameters)
473 elif asThreeFunctions:
475 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
476 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
477 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
479 __Function.update(__Parameters)
482 if asOneFunction is not None:
483 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
484 if asOneFunction["Direct"] is not None:
485 __Function = asOneFunction
487 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
489 __Function = { "Direct":asOneFunction }
490 __Function.update({"useApproximatedDerivatives":True})
491 __Function.update(__Parameters)
492 elif asThreeFunctions is not None:
493 if isinstance(asThreeFunctions, dict) and \
494 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
495 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
496 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
497 __Function = asThreeFunctions
498 elif isinstance(asThreeFunctions, dict) and \
499 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
500 __Function = asThreeFunctions
501 __Function.update({"useApproximatedDerivatives":True})
504 "The functions has to be given in a dictionnary which have either"+\
505 " 1 key (\"Direct\") or"+\
506 " 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
507 if "Direct" not in asThreeFunctions:
508 __Function["Direct"] = asThreeFunctions["Tangent"]
509 __Function.update(__Parameters)
513 if appliedInX is not None and isinstance(appliedInX, dict):
514 __appliedInX = appliedInX
515 elif appliedInX is not None:
516 __appliedInX = {"HXb":appliedInX}
520 if scheduledBy is not None:
521 self.__T = scheduledBy
523 if isinstance(__Function, dict) and \
524 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
525 ("Direct" in __Function) and (__Function["Direct"] is not None):
526 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
527 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
528 if "withdX" not in __Function: __Function["withdX"] = None
529 if "withReducingMemoryUse" not in __Function: __Function["withReducingMemoryUse"] = __reduceM
530 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = __avoidRC
531 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
532 if "withLengthOfRedundancy" not in __Function: __Function["withLengthOfRedundancy"] = -1
533 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
534 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
535 from daCore import NumericObjects
536 FDA = NumericObjects.FDApproximation(
538 Function = __Function["Direct"],
539 centeredDF = __Function["CenteredFiniteDifference"],
540 increment = __Function["DifferentialIncrement"],
541 dX = __Function["withdX"],
542 extraArguments = self.__extraArgs,
543 reducingMemoryUse = __Function["withReducingMemoryUse"],
544 avoidingRedundancy = __Function["withAvoidingRedundancy"],
545 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
546 lengthOfRedundancy = __Function["withLengthOfRedundancy"],
547 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
548 mpWorkers = __Function["NumberOfProcesses"],
549 mfEnabled = __Function["withmfEnabled"],
551 self.__FO["Direct"] = Operator(
553 fromMethod = FDA.DirectOperator,
554 reducingMemoryUse = __reduceM,
555 avoidingRedundancy = __avoidRC,
556 inputAsMultiFunction = inputAsMF,
557 extraArguments = self.__extraArgs,
558 enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
559 self.__FO["Tangent"] = Operator(
560 name = self.__name+"Tangent",
561 fromMethod = FDA.TangentOperator,
562 reducingMemoryUse = __reduceM,
563 avoidingRedundancy = __avoidRC,
564 inputAsMultiFunction = inputAsMF,
565 extraArguments = self.__extraArgs )
566 self.__FO["Adjoint"] = Operator(
567 name = self.__name+"Adjoint",
568 fromMethod = FDA.AdjointOperator,
569 reducingMemoryUse = __reduceM,
570 avoidingRedundancy = __avoidRC,
571 inputAsMultiFunction = inputAsMF,
572 extraArguments = self.__extraArgs )
573 elif isinstance(__Function, dict) and \
574 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
575 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
576 self.__FO["Direct"] = Operator(
578 fromMethod = __Function["Direct"],
579 reducingMemoryUse = __reduceM,
580 avoidingRedundancy = __avoidRC,
581 inputAsMultiFunction = inputAsMF,
582 extraArguments = self.__extraArgs,
583 enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
584 self.__FO["Tangent"] = Operator(
585 name = self.__name+"Tangent",
586 fromMethod = __Function["Tangent"],
587 reducingMemoryUse = __reduceM,
588 avoidingRedundancy = __avoidRC,
589 inputAsMultiFunction = inputAsMF,
590 extraArguments = self.__extraArgs )
591 self.__FO["Adjoint"] = Operator(
592 name = self.__name+"Adjoint",
593 fromMethod = __Function["Adjoint"],
594 reducingMemoryUse = __reduceM,
595 avoidingRedundancy = __avoidRC,
596 inputAsMultiFunction = inputAsMF,
597 extraArguments = self.__extraArgs )
598 elif asMatrix is not None:
599 if isinstance(__Matrix, str):
600 __Matrix = PlatformInfo.strmatrix2liststr( __Matrix )
601 __matrice = numpy.asarray( __Matrix, dtype=float )
602 self.__FO["Direct"] = Operator(
604 fromMatrix = __matrice,
605 reducingMemoryUse = __reduceM,
606 avoidingRedundancy = __avoidRC,
607 inputAsMultiFunction = inputAsMF,
608 enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
609 self.__FO["Tangent"] = Operator(
610 name = self.__name+"Tangent",
611 fromMatrix = __matrice,
612 reducingMemoryUse = __reduceM,
613 avoidingRedundancy = __avoidRC,
614 inputAsMultiFunction = inputAsMF )
615 self.__FO["Adjoint"] = Operator(
616 name = self.__name+"Adjoint",
617 fromMatrix = __matrice.T,
618 reducingMemoryUse = __reduceM,
619 avoidingRedundancy = __avoidRC,
620 inputAsMultiFunction = inputAsMF )
624 "The %s object is improperly defined or undefined,"%self.__name+\
625 " it requires at minima either a matrix, a Direct operator for"+\
626 " approximate derivatives or a Tangent/Adjoint operators pair."+\
627 " Please check your operator input.")
629 if __appliedInX is not None:
630 self.__FO["AppliedInX"] = {}
631 for key in __appliedInX:
632 if isinstance(__appliedInX[key], str):
633 __appliedInX[key] = PlatformInfo.strvect2liststr( __appliedInX[key] )
634 self.__FO["AppliedInX"][key] = numpy.ravel( __appliedInX[key] ).reshape((-1,1))
636 self.__FO["AppliedInX"] = None
642 "x.__repr__() <==> repr(x)"
643 return repr(self.__FO)
646 "x.__str__() <==> str(x)"
647 return str(self.__FO)
649 # ==============================================================================
650 class Algorithm(object):
652 Classe générale d'interface de type algorithme
654 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
655 d'assimilation, en fournissant un container (dictionnaire) de variables
656 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
658 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
660 def __init__(self, name):
662 L'initialisation présente permet de fabriquer des variables de stockage
663 disponibles de manière générique dans les algorithmes élémentaires. Ces
664 variables de stockage sont ensuite conservées dans un dictionnaire
665 interne à l'objet, mais auquel on accède par la méthode "get".
667 Les variables prévues sont :
668 - APosterioriCorrelations : matrice de corrélations de la matrice A
669 - APosterioriCovariance : matrice de covariances a posteriori : A
670 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
671 - APosterioriVariances : vecteur des variances de la matrice A
672 - Analysis : vecteur d'analyse : Xa
673 - BMA : Background moins Analysis : Xa - Xb
674 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
675 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
676 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
677 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
678 - CostFunctionJo : partie observations de la fonction-coût : Jo
679 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
680 - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
681 - CurrentOptimum : état optimal courant lors d'itérations
682 - CurrentState : état courant lors d'itérations
683 - CurrentStepNumber : pas courant d'avancement dans les algorithmes en évolution, à partir de 0
684 - EnsembleOfSimulations : ensemble d'états (sorties, simulations) rangés par colonne dans une matrice
685 - EnsembleOfSnapshots : ensemble d'états rangés par colonne dans une matrice
686 - EnsembleOfStates : ensemble d'états (entrées, paramètres) rangés par colonne dans une matrice
687 - ForecastCovariance : covariance de l'état prédit courant lors d'itérations
688 - ForecastState : état prédit courant lors d'itérations
689 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
690 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
691 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
692 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
693 - Innovation : l'innovation : d = Y - H(X)
694 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
695 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
696 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
697 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
698 - KalmanGainAtOptimum : gain de Kalman à l'optimum
699 - MahalanobisConsistency : indicateur de consistance des covariances
700 - OMA : Observation moins Analyse : Y - Xa
701 - OMB : Observation moins Background : Y - Xb
702 - Residu : dans le cas des algorithmes de vérification
703 - SampledStateForQuantiles : échantillons d'états pour l'estimation des quantiles
704 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
705 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
706 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
707 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
708 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
709 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
710 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
711 On peut rajouter des variables à stocker dans l'initialisation de
712 l'algorithme élémentaire qui va hériter de cette classe
714 logging.debug("%s Initialisation", str(name))
715 self._m = PlatformInfo.SystemUsage()
717 self._name = str( name )
718 self._parameters = {"StoreSupplementaryCalculations":[]}
719 self.__internal_state = {}
720 self.__required_parameters = {}
721 self.__required_inputs = {
722 "RequiredInputValues":{"mandatory":(), "optional":()},
723 "ClassificationTags":[],
725 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
726 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
727 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
728 self.__replace_by_the_new_name = {} # Nouveau nom à partir d'un nom ancien
730 self.StoredVariables = {}
731 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
732 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
733 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
734 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
735 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
736 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
737 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
738 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
739 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
740 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
741 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
742 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
743 self.StoredVariables["CurrentEnsembleState"] = Persistence.OneMatrix(name = "CurrentEnsembleState")
744 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
745 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
746 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
747 self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
748 self.StoredVariables["EnsembleOfSimulations"] = Persistence.OneMatrix(name = "EnsembleOfSimulations")
749 self.StoredVariables["EnsembleOfSnapshots"] = Persistence.OneMatrix(name = "EnsembleOfSnapshots")
750 self.StoredVariables["EnsembleOfStates"] = Persistence.OneMatrix(name = "EnsembleOfStates")
751 self.StoredVariables["ForecastCovariance"] = Persistence.OneMatrix(name = "ForecastCovariance")
752 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
753 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
754 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
755 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
756 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
757 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
758 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
759 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
760 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
761 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
762 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
763 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
764 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
765 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
766 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
767 self.StoredVariables["OptimalPoints"] = Persistence.OneVector(name = "OptimalPoints")
768 self.StoredVariables["ReducedBasis"] = Persistence.OneMatrix(name = "ReducedBasis")
769 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
770 self.StoredVariables["Residus"] = Persistence.OneVector(name = "Residus")
771 self.StoredVariables["SampledStateForQuantiles"] = Persistence.OneMatrix(name = "SampledStateForQuantiles")
772 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
773 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
774 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
775 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
776 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
777 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
778 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
779 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
781 for k in self.StoredVariables:
782 self.__canonical_stored_name[k.lower()] = k
784 for k, v in self.__variable_names_not_public.items():
785 self.__canonical_parameter_name[k.lower()] = k
786 self.__canonical_parameter_name["algorithm"] = "Algorithm"
787 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
789 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
791 logging.debug("%s Lancement", self._name)
792 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
793 self._getTimeState(reset=True)
795 # Mise a jour des paramètres internes avec le contenu de Parameters, en
796 # reprenant les valeurs par défauts pour toutes celles non définies
797 self.__setParameters(Parameters, reset=True)
798 for k, v in self.__variable_names_not_public.items():
799 if k not in self._parameters: self.__setParameters( {k:v} )
801 # Corrections et compléments des vecteurs
802 def __test_vvalue(argument, variable, argname, symbol=None):
803 if symbol is None: symbol = variable
805 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
806 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol))
807 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
808 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol))
810 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol))
812 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
813 logging.debug("%s %s vector %s is required and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
814 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
815 logging.debug("%s %s vector %s is optional and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
818 "%s %s vector %s is set although neither required nor optional, and its size is %i."%(
819 self._name,argname,symbol,numpy.array(argument).size))
821 __test_vvalue( Xb, "Xb", "Background or initial state" )
822 __test_vvalue( Y, "Y", "Observation" )
823 __test_vvalue( U, "U", "Control" )
825 # Corrections et compléments des covariances
826 def __test_cvalue(argument, variable, argname, symbol=None):
827 if symbol is None: symbol = variable
829 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
830 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol))
831 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
832 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol))
834 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol))
836 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
837 logging.debug("%s %s error covariance matrix %s is required and set."%(self._name,argname,symbol))
838 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
839 logging.debug("%s %s error covariance matrix %s is optional and set."%(self._name,argname,symbol))
841 logging.debug("%s %s error covariance matrix %s is set although neither required nor optional."%(self._name,argname,symbol))
843 __test_cvalue( B, "B", "Background" )
844 __test_cvalue( R, "R", "Observation" )
845 __test_cvalue( Q, "Q", "Evolution" )
847 # Corrections et compléments des opérateurs
848 def __test_ovalue(argument, variable, argname, symbol=None):
849 if symbol is None: symbol = variable
850 if argument is None or (isinstance(argument,dict) and len(argument)==0):
851 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
852 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol))
853 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
854 logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol))
856 logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol))
858 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
859 logging.debug("%s %s operator %s is required and set."%(self._name,argname,symbol))
860 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
861 logging.debug("%s %s operator %s is optional and set."%(self._name,argname,symbol))
863 logging.debug("%s %s operator %s is set although neither required nor optional."%(self._name,argname,symbol))
865 __test_ovalue( HO, "HO", "Observation", "H" )
866 __test_ovalue( EM, "EM", "Evolution", "M" )
867 __test_ovalue( CM, "CM", "Control Model", "C" )
869 # Corrections et compléments des bornes
870 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
871 logging.debug("%s Bounds taken into account"%(self._name,))
873 self._parameters["Bounds"] = None
874 if ("StateBoundsForQuantiles" in self._parameters) \
875 and isinstance(self._parameters["StateBoundsForQuantiles"], (list, tuple)) \
876 and (len(self._parameters["StateBoundsForQuantiles"]) > 0):
877 logging.debug("%s Bounds for quantiles states taken into account"%(self._name,))
878 # Attention : contrairement à Bounds, pas de défaut à None, sinon on ne peut pas être sans bornes
880 # Corrections et compléments de l'initialisation en X
881 if "InitializationPoint" in self._parameters:
883 if self._parameters["InitializationPoint"] is not None and hasattr(self._parameters["InitializationPoint"],'size'):
884 if self._parameters["InitializationPoint"].size != numpy.ravel(Xb).size:
885 raise ValueError("Incompatible size %i of forced initial point that have to replace the background of size %i" \
886 %(self._parameters["InitializationPoint"].size,numpy.ravel(Xb).size))
887 # Obtenu par typecast : numpy.ravel(self._parameters["InitializationPoint"])
889 self._parameters["InitializationPoint"] = numpy.ravel(Xb)
891 if self._parameters["InitializationPoint"] is None:
892 raise ValueError("Forced initial point can not be set without any given Background or required value")
894 # Correction pour pallier a un bug de TNC sur le retour du Minimum
895 if "Minimizer" in self._parameters and self._parameters["Minimizer"] == "TNC":
896 self.setParameterValue("StoreInternalVariables",True)
898 # Verbosité et logging
899 if logging.getLogger().level < logging.WARNING:
900 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
901 self._parameters["optmessages"] = 15
903 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
904 self._parameters["optmessages"] = 0
908 def _post_run(self,_oH=None):
910 if ("StoreSupplementaryCalculations" in self._parameters) and \
911 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
912 for _A in self.StoredVariables["APosterioriCovariance"]:
913 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
914 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
915 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
916 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
917 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
918 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
919 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
920 self.StoredVariables["APosterioriCorrelations"].store( _C )
921 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
923 "%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i",
924 self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
926 "%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i",
927 self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
928 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
929 logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
930 logging.debug("%s Terminé", self._name)
933 def _toStore(self, key):
934 "True if in StoreSupplementaryCalculations, else False"
935 return key in self._parameters["StoreSupplementaryCalculations"]
937 def get(self, key=None):
939 Renvoie l'une des variables stockées identifiée par la clé, ou le
940 dictionnaire de l'ensemble des variables disponibles en l'absence de
941 clé. Ce sont directement les variables sous forme objet qui sont
942 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
943 des classes de persistance.
946 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
948 return self.StoredVariables
950 def __contains__(self, key=None):
951 "D.__contains__(k) -> True if D has a key k, else False"
952 if key is None or key.lower() not in self.__canonical_stored_name:
955 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
958 "D.keys() -> list of D's keys"
959 if hasattr(self, "StoredVariables"):
960 return self.StoredVariables.keys()
965 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
966 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
967 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
972 raise TypeError("pop expected at least 1 arguments, got 0")
973 "If key is not found, d is returned if given, otherwise KeyError is raised"
979 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
981 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
982 sa forme mathématique la plus naturelle possible.
984 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
986 def defineRequiredParameter(self,
998 Permet de définir dans l'algorithme des paramètres requis et leurs
999 caractéristiques par défaut.
1002 raise ValueError("A name is mandatory to define a required parameter.")
1004 self.__required_parameters[name] = {
1005 "default" : default,
1006 "typecast" : typecast,
1009 "listval" : listval,
1010 "listadv" : listadv,
1011 "message" : message,
1012 "oldname" : oldname,
1014 self.__canonical_parameter_name[name.lower()] = name
1015 if oldname is not None:
1016 self.__canonical_parameter_name[oldname.lower()] = name # Conversion
1017 self.__replace_by_the_new_name[oldname.lower()] = name
1018 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
1020 def getRequiredParameters(self, noDetails=True):
1022 Renvoie la liste des noms de paramètres requis ou directement le
1023 dictionnaire des paramètres requis.
1026 return sorted(self.__required_parameters.keys())
1028 return self.__required_parameters
1030 def setParameterValue(self, name=None, value=None):
1032 Renvoie la valeur d'un paramètre requis de manière contrôlée
1034 __k = self.__canonical_parameter_name[name.lower()]
1035 default = self.__required_parameters[__k]["default"]
1036 typecast = self.__required_parameters[__k]["typecast"]
1037 minval = self.__required_parameters[__k]["minval"]
1038 maxval = self.__required_parameters[__k]["maxval"]
1039 listval = self.__required_parameters[__k]["listval"]
1040 listadv = self.__required_parameters[__k]["listadv"]
1042 if value is None and default is None:
1044 elif value is None and default is not None:
1045 if typecast is None: __val = default
1046 else: __val = typecast( default )
1048 if typecast is None: __val = value
1051 __val = typecast( value )
1053 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
1055 if minval is not None and (numpy.array(__val, float) < minval).any():
1056 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
1057 if maxval is not None and (numpy.array(__val, float) > maxval).any():
1058 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
1059 if listval is not None or listadv is not None:
1060 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
1062 if listval is not None and v in listval: continue
1063 elif listadv is not None and v in listadv: continue
1065 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
1066 elif not (listval is not None and __val in listval) and not (listadv is not None and __val in listadv):
1067 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
1071 def requireInputArguments(self, mandatory=(), optional=()):
1073 Permet d'imposer des arguments de calcul requis en entrée.
1075 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
1076 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
1078 def getInputArguments(self):
1080 Permet d'obtenir les listes des arguments de calcul requis en entrée.
1082 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
1084 def setAttributes(self, tags=()):
1086 Permet d'adjoindre des attributs comme les tags de classification.
1087 Renvoie la liste actuelle dans tous les cas.
1089 self.__required_inputs["ClassificationTags"].extend( tags )
1090 return self.__required_inputs["ClassificationTags"]
1092 def __setParameters(self, fromDico={}, reset=False):
1094 Permet de stocker les paramètres reçus dans le dictionnaire interne.
1096 self._parameters.update( fromDico )
1097 __inverse_fromDico_keys = {}
1098 for k in fromDico.keys():
1099 if k.lower() in self.__canonical_parameter_name:
1100 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
1101 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
1102 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
1104 for k in __inverse_fromDico_keys.values():
1105 if k.lower() in self.__replace_by_the_new_name:
1106 __newk = self.__replace_by_the_new_name[k.lower()]
1107 __msg = "the parameter \"%s\" used in \"%s\" algorithm case is deprecated and has to be replaced by \"%s\"."%(k,self._name,__newk)
1108 __msg += " Please update your code."
1109 warnings.warn(__msg, FutureWarning, stacklevel=50)
1111 for k in self.__required_parameters.keys():
1112 if k in __canonic_fromDico_keys:
1113 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
1115 self._parameters[k] = self.setParameterValue(k)
1118 if hasattr(self._parameters[k],"__len__") and len(self._parameters[k]) > 100:
1119 logging.debug("%s %s de longueur %s", self._name, self.__required_parameters[k]["message"], len(self._parameters[k]))
1121 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
1123 def _setInternalState(self, key=None, value=None, fromDico={}, reset=False):
1125 Permet de stocker des variables nommées constituant l'état interne
1127 if reset: # Vide le dictionnaire préalablement
1128 self.__internal_state = {}
1129 if key is not None and value is not None:
1130 self.__internal_state[key] = value
1131 self.__internal_state.update( dict(fromDico) )
1133 def _getInternalState(self, key=None):
1135 Restitue un état interne sous la forme d'un dictionnaire de variables nommées
1137 if key is not None and key in self.__internal_state:
1138 return self.__internal_state[key]
1140 return self.__internal_state
1142 def _getTimeState(self, reset=False):
1144 Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde
1147 self.__initial_cpu_time = time.process_time()
1148 self.__initial_elapsed_time = time.perf_counter()
1151 self.__cpu_time = time.process_time() - self.__initial_cpu_time
1152 self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time
1153 return self.__cpu_time, self.__elapsed_time
1155 def _StopOnTimeLimit(self, X=None, withReason=False):
1156 "Stop criteria on time limit: True/False [+ Reason]"
1157 c, e = self._getTimeState()
1158 if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]:
1159 __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"])
1160 elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]:
1161 __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"])
1163 __SC, __SR = False, ""
1169 # ==============================================================================
1170 class PartialAlgorithm(object):
1172 Classe pour mimer "Algorithm" du point de vue stockage, mais sans aucune
1173 action avancée comme la vérification . Pour les méthodes reprises ici,
1174 le fonctionnement est identique à celles de la classe "Algorithm".
1176 def __init__(self, name):
1177 self._name = str( name )
1178 self._parameters = {"StoreSupplementaryCalculations":[]}
1180 self.StoredVariables = {}
1181 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
1182 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
1183 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
1184 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
1185 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
1186 self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
1188 self.__canonical_stored_name = {}
1189 for k in self.StoredVariables:
1190 self.__canonical_stored_name[k.lower()] = k
1192 def _toStore(self, key):
1193 "True if in StoreSupplementaryCalculations, else False"
1194 return key in self._parameters["StoreSupplementaryCalculations"]
1196 def get(self, key=None):
1198 Renvoie l'une des variables stockées identifiée par la clé, ou le
1199 dictionnaire de l'ensemble des variables disponibles en l'absence de
1200 clé. Ce sont directement les variables sous forme objet qui sont
1201 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
1202 des classes de persistance.
1205 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
1207 return self.StoredVariables
1209 # ==============================================================================
1210 class AlgorithmAndParameters(object):
1212 Classe générale d'interface d'action pour l'algorithme et ses paramètres
1215 name = "GenericAlgorithm",
1222 self.__name = str(name)
1226 self.__algorithm = {}
1227 self.__algorithmFile = None
1228 self.__algorithmName = None
1230 self.updateParameters( asDict, asScript )
1232 if asAlgorithm is None and asScript is not None:
1233 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1235 __Algo = asAlgorithm
1237 if __Algo is not None:
1238 self.__A = str(__Algo)
1239 self.__P.update( {"Algorithm":self.__A} )
1241 self.__setAlgorithm( self.__A )
1243 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
1245 def updateParameters(self,
1249 "Mise a jour des parametres"
1250 if asDict is None and asScript is not None:
1251 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1255 if __Dict is not None:
1256 self.__P.update( dict(__Dict) )
1258 def executePythonScheme(self, asDictAO = None):
1259 "Permet de lancer le calcul d'assimilation"
1260 Operator.CM.clearCache()
1262 if not isinstance(asDictAO, dict):
1263 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1264 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
1265 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
1266 else: self.__Xb = None
1267 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
1268 else: self.__Y = asDictAO["Observation"]
1269 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
1270 else: self.__U = asDictAO["ControlInput"]
1271 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
1272 else: self.__HO = asDictAO["ObservationOperator"]
1273 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
1274 else: self.__EM = asDictAO["EvolutionModel"]
1275 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
1276 else: self.__CM = asDictAO["ControlModel"]
1277 self.__B = asDictAO["BackgroundError"]
1278 self.__R = asDictAO["ObservationError"]
1279 self.__Q = asDictAO["EvolutionError"]
1281 self.__shape_validate()
1283 self.__algorithm.run(
1293 Parameters = self.__P,
1297 def executeYACSScheme(self, FileName=None):
1298 "Permet de lancer le calcul d'assimilation"
1299 if FileName is None or not os.path.exists(FileName):
1300 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1302 __file = os.path.abspath(FileName)
1303 logging.debug("The YACS file name is \"%s\"."%__file)
1304 if not PlatformInfo.has_salome or \
1305 not PlatformInfo.has_yacs or \
1306 not PlatformInfo.has_adao:
1307 raise ImportError("\n\n"+\
1308 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
1309 "Please load the right environnement before trying to use it.\n")
1312 import SALOMERuntime
1314 SALOMERuntime.RuntimeSALOME_setRuntime()
1316 r = pilot.getRuntime()
1317 xmlLoader = loader.YACSLoader()
1318 xmlLoader.registerProcCataLoader()
1320 catalogAd = r.loadCatalog("proc", __file)
1321 r.addCatalog(catalogAd)
1326 p = xmlLoader.load(__file)
1327 except IOError as ex:
1328 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1330 logger = p.getLogger("parser")
1331 if not logger.isEmpty():
1332 print("The imported YACS XML schema has errors on parsing:")
1333 print(logger.getStr())
1336 print("The YACS XML schema is not valid and will not be executed:")
1337 print(p.getErrorReport())
1339 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1340 p.checkConsistency(info)
1341 if info.areWarningsOrErrors():
1342 print("The YACS XML schema is not coherent and will not be executed:")
1343 print(info.getGlobalRepr())
1345 e = pilot.ExecutorSwig()
1347 if p.getEffectiveState() != pilot.DONE:
1348 print(p.getErrorReport())
1352 def get(self, key = None):
1353 "Vérifie l'existence d'une clé de variable ou de paramètres"
1354 if key in self.__algorithm:
1355 return self.__algorithm.get( key )
1356 elif key in self.__P:
1357 return self.__P[key]
1359 allvariables = self.__P
1360 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1363 def pop(self, k, d):
1364 "Necessaire pour le pickling"
1365 return self.__algorithm.pop(k, d)
1367 def getAlgorithmRequiredParameters(self, noDetails=True):
1368 "Renvoie la liste des paramètres requis selon l'algorithme"
1369 return self.__algorithm.getRequiredParameters(noDetails)
1371 def getAlgorithmInputArguments(self):
1372 "Renvoie la liste des entrées requises selon l'algorithme"
1373 return self.__algorithm.getInputArguments()
1375 def getAlgorithmAttributes(self):
1376 "Renvoie la liste des attributs selon l'algorithme"
1377 return self.__algorithm.setAttributes()
1379 def setObserver(self, __V, __O, __I, __S):
1380 if self.__algorithm is None \
1381 or isinstance(self.__algorithm, dict) \
1382 or not hasattr(self.__algorithm,"StoredVariables"):
1383 raise ValueError("No observer can be build before choosing an algorithm.")
1384 if __V not in self.__algorithm:
1385 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1387 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1390 HookParameters = __I,
1393 def removeObserver(self, __V, __O, __A = False):
1394 if self.__algorithm is None \
1395 or isinstance(self.__algorithm, dict) \
1396 or not hasattr(self.__algorithm,"StoredVariables"):
1397 raise ValueError("No observer can be removed before choosing an algorithm.")
1398 if __V not in self.__algorithm:
1399 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1401 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1406 def hasObserver(self, __V):
1407 if self.__algorithm is None \
1408 or isinstance(self.__algorithm, dict) \
1409 or not hasattr(self.__algorithm,"StoredVariables"):
1411 if __V not in self.__algorithm:
1413 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1416 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1417 for k in self.__variable_names_not_public:
1418 if k in __allvariables: __allvariables.remove(k)
1419 return __allvariables
1421 def __contains__(self, key=None):
1422 "D.__contains__(k) -> True if D has a key k, else False"
1423 return key in self.__algorithm or key in self.__P
1426 "x.__repr__() <==> repr(x)"
1427 return repr(self.__A)+", "+repr(self.__P)
1430 "x.__str__() <==> str(x)"
1431 return str(self.__A)+", "+str(self.__P)
1433 def __setAlgorithm(self, choice = None ):
1435 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1436 d'assimilation. L'argument est un champ caractère se rapportant au nom
1437 d'un algorithme réalisant l'opération sur les arguments fixes.
1440 raise ValueError("Error: algorithm choice has to be given")
1441 if self.__algorithmName is not None:
1442 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1443 daDirectory = "daAlgorithms"
1445 # Recherche explicitement le fichier complet
1446 # ------------------------------------------
1448 for directory in sys.path:
1449 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1450 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1451 if module_path is None:
1453 "No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1455 # Importe le fichier complet comme un module
1456 # ------------------------------------------
1458 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1459 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1460 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1461 raise ImportError("this module does not define a valid elementary algorithm.")
1462 self.__algorithmName = str(choice)
1463 sys.path = sys_path_tmp ; del sys_path_tmp
1464 except ImportError as e:
1466 "The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1468 # Instancie un objet du type élémentaire du fichier
1469 # -------------------------------------------------
1470 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1473 def __shape_validate(self):
1475 Validation de la correspondance correcte des tailles des variables et
1476 des matrices s'il y en a.
1478 if self.__Xb is None: __Xb_shape = (0,)
1479 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1480 elif hasattr(self.__Xb,"shape"):
1481 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1482 else: __Xb_shape = self.__Xb.shape()
1483 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1485 if self.__Y is None: __Y_shape = (0,)
1486 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1487 elif hasattr(self.__Y,"shape"):
1488 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1489 else: __Y_shape = self.__Y.shape()
1490 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1492 if self.__U is None: __U_shape = (0,)
1493 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1494 elif hasattr(self.__U,"shape"):
1495 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1496 else: __U_shape = self.__U.shape()
1497 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1499 if self.__B is None: __B_shape = (0,0)
1500 elif hasattr(self.__B,"shape"):
1501 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1502 else: __B_shape = self.__B.shape()
1503 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1505 if self.__R is None: __R_shape = (0,0)
1506 elif hasattr(self.__R,"shape"):
1507 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1508 else: __R_shape = self.__R.shape()
1509 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1511 if self.__Q is None: __Q_shape = (0,0)
1512 elif hasattr(self.__Q,"shape"):
1513 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1514 else: __Q_shape = self.__Q.shape()
1515 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1517 if len(self.__HO) == 0: __HO_shape = (0,0)
1518 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1519 elif hasattr(self.__HO["Direct"],"shape"):
1520 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1521 else: __HO_shape = self.__HO["Direct"].shape()
1522 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1524 if len(self.__EM) == 0: __EM_shape = (0,0)
1525 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1526 elif hasattr(self.__EM["Direct"],"shape"):
1527 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1528 else: __EM_shape = self.__EM["Direct"].shape()
1529 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1531 if len(self.__CM) == 0: __CM_shape = (0,0)
1532 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1533 elif hasattr(self.__CM["Direct"],"shape"):
1534 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1535 else: __CM_shape = self.__CM["Direct"].shape()
1536 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1538 # Vérification des conditions
1539 # ---------------------------
1540 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1541 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1542 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1543 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1545 if not( min(__B_shape) == max(__B_shape) ):
1546 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1547 if not( min(__R_shape) == max(__R_shape) ):
1548 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1549 if not( min(__Q_shape) == max(__Q_shape) ):
1550 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1551 if not( min(__EM_shape) == max(__EM_shape) ):
1552 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1554 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1556 "Shape characteristic of observation operator (H)"+\
1557 " \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1558 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1560 "Shape characteristic of observation operator (H)"+\
1561 " \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1562 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1564 "Shape characteristic of observation operator (H)"+\
1565 " \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1566 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1568 "Shape characteristic of observation operator (H)"+\
1569 " \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1571 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1572 if self.__algorithmName in ["EnsembleBlue",]:
1573 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1574 self.__Xb = Persistence.OneVector("Background")
1575 for member in asPersistentVector:
1576 self.__Xb.store( numpy.asarray(member, dtype=float) )
1577 __Xb_shape = min(__B_shape)
1580 "Shape characteristic of a priori errors covariance matrix (B)"+\
1581 " \"%s\" and background vector (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1583 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1585 "Shape characteristic of observation errors covariance matrix (R)"+\
1586 " \"%s\" and observation vector (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1588 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1590 "Shape characteristic of evolution model (EM)"+\
1591 " \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1593 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1595 "Shape characteristic of control model (CM)"+\
1596 " \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1598 if ("Bounds" in self.__P) \
1599 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1600 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1601 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1602 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1604 if ("StateBoundsForQuantiles" in self.__P) \
1605 and (isinstance(self.__P["StateBoundsForQuantiles"], list) or isinstance(self.__P["StateBoundsForQuantiles"], tuple)) \
1606 and (len(self.__P["StateBoundsForQuantiles"]) != max(__Xb_shape)):
1607 raise ValueError("The number \"%s\" of bound pairs for the quantile state (X) components is different of the size \"%s\" of the state itself." \
1608 %(len(self.__P["StateBoundsForQuantiles"]),max(__Xb_shape)))
1612 # ==============================================================================
1613 class RegulationAndParameters(object):
1615 Classe générale d'interface d'action pour la régulation et ses paramètres
1618 name = "GenericRegulation",
1625 self.__name = str(name)
1628 if asAlgorithm is None and asScript is not None:
1629 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1631 __Algo = asAlgorithm
1633 if asDict is None and asScript is not None:
1634 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1638 if __Dict is not None:
1639 self.__P.update( dict(__Dict) )
1641 if __Algo is not None:
1642 self.__P.update( {"Algorithm":str(__Algo)} )
1644 def get(self, key = None):
1645 "Vérifie l'existence d'une clé de variable ou de paramètres"
1647 return self.__P[key]
1651 # ==============================================================================
1652 class DataObserver(object):
1654 Classe générale d'interface de type observer
1657 name = "GenericObserver",
1669 self.__name = str(name)
1674 if onVariable is None:
1675 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1676 elif type(onVariable) in (tuple, list):
1677 self.__V = tuple(map( str, onVariable ))
1678 if withInfo is None:
1681 self.__I = (str(withInfo),)*len(self.__V)
1682 elif isinstance(onVariable, str):
1683 self.__V = (onVariable,)
1684 if withInfo is None:
1685 self.__I = (onVariable,)
1687 self.__I = (str(withInfo),)
1689 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1691 if asObsObject is not None:
1692 self.__O = asObsObject
1694 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1695 __Function = Observer2Func(__FunctionText)
1696 self.__O = __Function.getfunc()
1698 for k in range(len(self.__V)):
1701 if ename not in withAlgo:
1702 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1704 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1707 "x.__repr__() <==> repr(x)"
1708 return repr(self.__V)+"\n"+repr(self.__O)
1711 "x.__str__() <==> str(x)"
1712 return str(self.__V)+"\n"+str(self.__O)
1714 # ==============================================================================
1715 class UserScript(object):
1717 Classe générale d'interface de type texte de script utilisateur
1720 name = "GenericUserScript",
1727 self.__name = str(name)
1729 if asString is not None:
1731 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1732 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1733 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1734 self.__F = Templates.ObserverTemplates[asTemplate]
1735 elif asScript is not None:
1736 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1741 "x.__repr__() <==> repr(x)"
1742 return repr(self.__F)
1745 "x.__str__() <==> str(x)"
1746 return str(self.__F)
1748 # ==============================================================================
1749 class ExternalParameters(object):
1751 Classe générale d'interface de type texte de script utilisateur
1754 name = "GenericExternalParameters",
1760 self.__name = str(name)
1763 self.updateParameters( asDict, asScript )
1765 def updateParameters(self,
1769 "Mise a jour des parametres"
1770 if asDict is None and asScript is not None:
1771 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1775 if __Dict is not None:
1776 self.__P.update( dict(__Dict) )
1778 def get(self, key = None):
1780 return self.__P[key]
1782 return list(self.__P.keys())
1785 return list(self.__P.keys())
1787 def pop(self, k, d):
1788 return self.__P.pop(k, d)
1791 return self.__P.items()
1793 def __contains__(self, key=None):
1794 "D.__contains__(k) -> True if D has a key k, else False"
1795 return key in self.__P
1797 # ==============================================================================
1798 class State(object):
1800 Classe générale d'interface de type état
1803 name = "GenericVector",
1805 asPersistentVector = None,
1811 toBeChecked = False,
1814 Permet de définir un vecteur :
1815 - asVector : entrée des données, comme un vecteur compatible avec le
1816 constructeur de numpy.matrix, ou "True" si entrée par script.
1817 - asPersistentVector : entrée des données, comme une série de vecteurs
1818 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1819 type Persistence, ou "True" si entrée par script.
1820 - asScript : si un script valide est donné contenant une variable
1821 nommée "name", la variable est de type "asVector" (par défaut) ou
1822 "asPersistentVector" selon que l'une de ces variables est placée à
1824 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1825 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1826 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1827 nommée "name"), on récupère les colonnes et on les range ligne après
1828 ligne (colMajor=False, par défaut) ou colonne après colonne
1829 (colMajor=True). La variable résultante est de type "asVector" (par
1830 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1833 self.__name = str(name)
1834 self.__check = bool(toBeChecked)
1838 self.__is_vector = False
1839 self.__is_series = False
1841 if asScript is not None:
1842 __Vector, __Series = None, None
1843 if asPersistentVector:
1844 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1846 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1847 elif asDataFile is not None:
1848 __Vector, __Series = None, None
1849 if asPersistentVector:
1850 if colNames is not None:
1851 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1853 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1854 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1855 __Series = numpy.transpose(__Series)
1856 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1857 __Series = numpy.transpose(__Series)
1859 if colNames is not None:
1860 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1862 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1864 __Vector = numpy.ravel(__Vector, order = "F")
1866 __Vector = numpy.ravel(__Vector, order = "C")
1868 __Vector, __Series = asVector, asPersistentVector
1870 if __Vector is not None:
1871 self.__is_vector = True
1872 if isinstance(__Vector, str):
1873 __Vector = PlatformInfo.strvect2liststr( __Vector )
1874 self.__V = numpy.ravel(numpy.asarray( __Vector, dtype=float )).reshape((-1,1))
1875 self.shape = self.__V.shape
1876 self.size = self.__V.size
1877 elif __Series is not None:
1878 self.__is_series = True
1879 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1880 self.__V = Persistence.OneVector(self.__name)
1881 if isinstance(__Series, str):
1882 __Series = PlatformInfo.strmatrix2liststr(__Series)
1883 for member in __Series:
1884 if isinstance(member, str):
1885 member = PlatformInfo.strvect2liststr( member )
1886 self.__V.store(numpy.asarray( member, dtype=float ))
1889 if isinstance(self.__V.shape, (tuple, list)):
1890 self.shape = self.__V.shape
1892 self.shape = self.__V.shape()
1893 if len(self.shape) == 1:
1894 self.shape = (self.shape[0],1)
1895 self.size = self.shape[0] * self.shape[1]
1898 "The %s object is improperly defined or undefined,"%self.__name+\
1899 " it requires at minima either a vector, a list/tuple of"+\
1900 " vectors or a persistent object. Please check your vector input.")
1902 if scheduledBy is not None:
1903 self.__T = scheduledBy
1905 def getO(self, withScheduler=False):
1907 return self.__V, self.__T
1908 elif self.__T is None:
1914 "Vérification du type interne"
1915 return self.__is_vector
1918 "Vérification du type interne"
1919 return self.__is_series
1922 "x.__repr__() <==> repr(x)"
1923 return repr(self.__V)
1926 "x.__str__() <==> str(x)"
1927 return str(self.__V)
1929 # ==============================================================================
1930 class Covariance(object):
1932 Classe générale d'interface de type covariance
1935 name = "GenericCovariance",
1936 asCovariance = None,
1937 asEyeByScalar = None,
1938 asEyeByVector = None,
1941 toBeChecked = False,
1944 Permet de définir une covariance :
1945 - asCovariance : entrée des données, comme une matrice compatible avec
1946 le constructeur de numpy.matrix
1947 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1948 multiplicatif d'une matrice de corrélation identité, aucune matrice
1949 n'étant donc explicitement à donner
1950 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1951 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1952 n'étant donc explicitement à donner
1953 - asCovObject : entrée des données comme un objet python, qui a les
1954 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1955 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1956 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1957 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1958 pleine doit être vérifié
1960 self.__name = str(name)
1961 self.__check = bool(toBeChecked)
1964 self.__is_scalar = False
1965 self.__is_vector = False
1966 self.__is_matrix = False
1967 self.__is_object = False
1969 if asScript is not None:
1970 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1972 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1974 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1976 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1978 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1980 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1982 if __Scalar is not None:
1983 if isinstance(__Scalar, str):
1984 __Scalar = PlatformInfo.strvect2liststr( __Scalar )
1985 if len(__Scalar) > 0: __Scalar = __Scalar[0]
1986 if numpy.array(__Scalar).size != 1:
1988 " The diagonal multiplier given to define a sparse matrix is"+\
1989 " not a unique scalar value.\n Its actual measured size is"+\
1990 " %i. Please check your scalar input."%numpy.array(__Scalar).size)
1991 self.__is_scalar = True
1992 self.__C = numpy.abs( float(__Scalar) )
1995 elif __Vector is not None:
1996 if isinstance(__Vector, str):
1997 __Vector = PlatformInfo.strvect2liststr( __Vector )
1998 self.__is_vector = True
1999 self.__C = numpy.abs( numpy.ravel(numpy.asarray( __Vector, dtype=float )) )
2000 self.shape = (self.__C.size,self.__C.size)
2001 self.size = self.__C.size**2
2002 elif __Matrix is not None:
2003 self.__is_matrix = True
2004 self.__C = numpy.matrix( __Matrix, float )
2005 self.shape = self.__C.shape
2006 self.size = self.__C.size
2007 elif __Object is not None:
2008 self.__is_object = True
2010 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__matmul__","__mul__","__rmatmul__","__rmul__"):
2011 if not hasattr(self.__C,at):
2012 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
2013 if hasattr(self.__C,"shape"):
2014 self.shape = self.__C.shape
2017 if hasattr(self.__C,"size"):
2018 self.size = self.__C.size
2026 def __validate(self):
2028 if self.__C is None:
2029 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
2030 if self.ismatrix() and min(self.shape) != max(self.shape):
2031 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
2032 if self.isobject() and min(self.shape) != max(self.shape):
2033 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
2034 if self.isscalar() and self.__C <= 0:
2035 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
2036 if self.isvector() and (self.__C <= 0).any():
2037 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
2038 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
2040 numpy.linalg.cholesky( self.__C )
2042 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
2043 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
2047 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
2050 "Vérification du type interne"
2051 return self.__is_scalar
2054 "Vérification du type interne"
2055 return self.__is_vector
2058 "Vérification du type interne"
2059 return self.__is_matrix
2062 "Vérification du type interne"
2063 return self.__is_object
2068 return Covariance(self.__name+"I", asCovariance = numpy.linalg.inv(self.__C) )
2069 elif self.isvector():
2070 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
2071 elif self.isscalar():
2072 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
2073 elif self.isobject() and hasattr(self.__C,"getI"):
2074 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
2076 return None # Indispensable
2081 return Covariance(self.__name+"T", asCovariance = self.__C.T )
2082 elif self.isvector():
2083 return Covariance(self.__name+"T", asEyeByVector = self.__C )
2084 elif self.isscalar():
2085 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
2086 elif self.isobject() and hasattr(self.__C,"getT"):
2087 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
2089 raise AttributeError("the %s covariance matrix has no getT attribute."%(self.__name,))
2092 "Décomposition de Cholesky"
2094 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
2095 elif self.isvector():
2096 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
2097 elif self.isscalar():
2098 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
2099 elif self.isobject() and hasattr(self.__C,"cholesky"):
2100 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
2102 raise AttributeError("the %s covariance matrix has no cholesky attribute."%(self.__name,))
2104 def choleskyI(self):
2105 "Inversion de la décomposition de Cholesky"
2107 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.linalg.cholesky(self.__C)) )
2108 elif self.isvector():
2109 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
2110 elif self.isscalar():
2111 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
2112 elif self.isobject() and hasattr(self.__C,"choleskyI"):
2113 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
2115 raise AttributeError("the %s covariance matrix has no choleskyI attribute."%(self.__name,))
2118 "Racine carrée matricielle"
2121 return Covariance(self.__name+"C", asCovariance = numpy.real(scipy.linalg.sqrtm(self.__C)) )
2122 elif self.isvector():
2123 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
2124 elif self.isscalar():
2125 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
2126 elif self.isobject() and hasattr(self.__C,"sqrtm"):
2127 return Covariance(self.__name+"C", asCovObject = self.__C.sqrtm() )
2129 raise AttributeError("the %s covariance matrix has no sqrtm attribute."%(self.__name,))
2132 "Inversion de la racine carrée matricielle"
2135 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.real(scipy.linalg.sqrtm(self.__C))) )
2136 elif self.isvector():
2137 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
2138 elif self.isscalar():
2139 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
2140 elif self.isobject() and hasattr(self.__C,"sqrtmI"):
2141 return Covariance(self.__name+"H", asCovObject = self.__C.sqrtmI() )
2143 raise AttributeError("the %s covariance matrix has no sqrtmI attribute."%(self.__name,))
2145 def diag(self, msize=None):
2146 "Diagonale de la matrice"
2148 return numpy.diag(self.__C)
2149 elif self.isvector():
2151 elif self.isscalar():
2153 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2155 return self.__C * numpy.ones(int(msize))
2156 elif self.isobject() and hasattr(self.__C,"diag"):
2157 return self.__C.diag()
2159 raise AttributeError("the %s covariance matrix has no diag attribute."%(self.__name,))
2161 def trace(self, msize=None):
2162 "Trace de la matrice"
2164 return numpy.trace(self.__C)
2165 elif self.isvector():
2166 return float(numpy.sum(self.__C))
2167 elif self.isscalar():
2169 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2171 return self.__C * int(msize)
2172 elif self.isobject():
2173 return self.__C.trace()
2175 raise AttributeError("the %s covariance matrix has no trace attribute."%(self.__name,))
2177 def asfullmatrix(self, msize=None):
2180 return numpy.asarray(self.__C, dtype=float)
2181 elif self.isvector():
2182 return numpy.asarray( numpy.diag(self.__C), dtype=float )
2183 elif self.isscalar():
2185 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2187 return numpy.asarray( self.__C * numpy.eye(int(msize)), dtype=float )
2188 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
2189 return self.__C.asfullmatrix()
2191 raise AttributeError("the %s covariance matrix has no asfullmatrix attribute."%(self.__name,))
2193 def assparsematrix(self):
2201 "x.__repr__() <==> repr(x)"
2202 return repr(self.__C)
2205 "x.__str__() <==> str(x)"
2206 return str(self.__C)
2208 def __add__(self, other):
2209 "x.__add__(y) <==> x+y"
2210 if self.ismatrix() or self.isobject():
2211 return self.__C + numpy.asmatrix(other)
2212 elif self.isvector() or self.isscalar():
2213 _A = numpy.asarray(other)
2214 if len(_A.shape) == 1:
2215 _A.reshape((-1,1))[::2] += self.__C
2217 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
2218 return numpy.asmatrix(_A)
2220 def __radd__(self, other):
2221 "x.__radd__(y) <==> y+x"
2222 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
2224 def __sub__(self, other):
2225 "x.__sub__(y) <==> x-y"
2226 if self.ismatrix() or self.isobject():
2227 return self.__C - numpy.asmatrix(other)
2228 elif self.isvector() or self.isscalar():
2229 _A = numpy.asarray(other)
2230 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
2231 return numpy.asmatrix(_A)
2233 def __rsub__(self, other):
2234 "x.__rsub__(y) <==> y-x"
2235 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
2238 "x.__neg__() <==> -x"
2241 def __matmul__(self, other):
2242 "x.__mul__(y) <==> x@y"
2243 if self.ismatrix() and isinstance(other, (int, float)):
2244 return numpy.asarray(self.__C) * other
2245 elif self.ismatrix() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2246 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2247 return numpy.ravel(self.__C @ numpy.ravel(other))
2248 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2249 return numpy.asarray(self.__C) @ numpy.asarray(other)
2251 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asarray(other).shape,self.__name))
2252 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2253 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2254 return numpy.ravel(self.__C) * numpy.ravel(other)
2255 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2256 return numpy.ravel(self.__C).reshape((-1,1)) * numpy.asarray(other)
2258 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2259 elif self.isscalar() and isinstance(other,numpy.matrix):
2260 return numpy.asarray(self.__C * other)
2261 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2262 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2263 return self.__C * numpy.ravel(other)
2265 return self.__C * numpy.asarray(other)
2266 elif self.isobject():
2267 return self.__C.__matmul__(other)
2269 raise NotImplementedError("%s covariance matrix __matmul__ method not available for %s type!"%(self.__name,type(other)))
2271 def __mul__(self, other):
2272 "x.__mul__(y) <==> x*y"
2273 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2274 return self.__C * other
2275 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2276 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2277 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2278 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2279 return self.__C * numpy.asmatrix(other)
2282 "operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
2283 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2284 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2285 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
2286 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2287 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
2290 "operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2291 elif self.isscalar() and isinstance(other,numpy.matrix):
2292 return self.__C * other
2293 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2294 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2295 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2297 return self.__C * numpy.asmatrix(other)
2298 elif self.isobject():
2299 return self.__C.__mul__(other)
2301 raise NotImplementedError(
2302 "%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
2304 def __rmatmul__(self, other):
2305 "x.__rmul__(y) <==> y@x"
2306 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2307 return other * self.__C
2308 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2309 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2310 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2311 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2312 return numpy.asmatrix(other) * self.__C
2315 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2316 elif self.isvector() and isinstance(other,numpy.matrix):
2317 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2318 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2319 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2320 return numpy.asmatrix(numpy.array(other) * self.__C)
2323 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2324 elif self.isscalar() and isinstance(other,numpy.matrix):
2325 return other * self.__C
2326 elif self.isobject():
2327 return self.__C.__rmatmul__(other)
2329 raise NotImplementedError(
2330 "%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name,type(other)))
2332 def __rmul__(self, other):
2333 "x.__rmul__(y) <==> y*x"
2334 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2335 return other * self.__C
2336 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2337 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2338 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2339 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2340 return numpy.asmatrix(other) * self.__C
2343 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2344 elif self.isvector() and isinstance(other,numpy.matrix):
2345 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2346 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2347 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2348 return numpy.asmatrix(numpy.array(other) * self.__C)
2351 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2352 elif self.isscalar() and isinstance(other,numpy.matrix):
2353 return other * self.__C
2354 elif self.isscalar() and isinstance(other,float):
2355 return other * self.__C
2356 elif self.isobject():
2357 return self.__C.__rmul__(other)
2359 raise NotImplementedError(
2360 "%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
2363 "x.__len__() <==> len(x)"
2364 return self.shape[0]
2366 # ==============================================================================
2367 class Observer2Func(object):
2369 Création d'une fonction d'observateur a partir de son texte
2371 def __init__(self, corps=""):
2372 self.__corps = corps
2373 def func(self,var,info):
2374 "Fonction d'observation"
2377 "Restitution du pointeur de fonction dans l'objet"
2380 # ==============================================================================
2381 class CaseLogger(object):
2383 Conservation des commandes de création d'un cas
2385 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
2386 self.__name = str(__name)
2387 self.__objname = str(__objname)
2388 self.__logSerie = []
2389 self.__switchoff = False
2391 "TUI" :Interfaces._TUIViewer,
2392 "SCD" :Interfaces._SCDViewer,
2393 "YACS":Interfaces._YACSViewer,
2394 "SimpleReportInRst":Interfaces._SimpleReportInRstViewer,
2395 "SimpleReportInHtml":Interfaces._SimpleReportInHtmlViewer,
2396 "SimpleReportInPlainTxt":Interfaces._SimpleReportInPlainTxtViewer,
2399 "TUI" :Interfaces._TUIViewer,
2400 "COM" :Interfaces._COMViewer,
2402 if __addViewers is not None:
2403 self.__viewers.update(dict(__addViewers))
2404 if __addLoaders is not None:
2405 self.__loaders.update(dict(__addLoaders))
2407 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
2408 "Enregistrement d'une commande individuelle"
2409 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
2410 if "self" in __keys: __keys.remove("self")
2411 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
2413 self.__switchoff = True
2415 self.__switchoff = False
2417 def dump(self, __filename=None, __format="TUI", __upa=""):
2418 "Restitution normalisée des commandes"
2419 if __format in self.__viewers:
2420 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2422 raise ValueError("Dumping as \"%s\" is not available"%__format)
2423 return __formater.dump(__filename, __upa)
2425 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2426 "Chargement normalisé des commandes"
2427 if __format in self.__loaders:
2428 __formater = self.__loaders[__format]()
2430 raise ValueError("Loading as \"%s\" is not available"%__format)
2431 return __formater.load(__filename, __content, __object)
2433 # ==============================================================================
2436 _extraArguments = None,
2437 _sFunction = lambda x: x,
2442 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2443 correspondante de valeurs de la fonction en argument
2445 # Vérifications et définitions initiales
2446 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2447 if not PlatformInfo.isIterable( __xserie ):
2448 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2450 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2453 __mpWorkers = int(_mpWorkers)
2455 import multiprocessing
2466 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2467 with multiprocessing.Pool(__mpWorkers) as pool:
2468 __multiHX = pool.map( _sFunction, _jobs )
2471 # logging.debug("MULTF Internal multiprocessing calculation end")
2473 # logging.debug("MULTF Internal monoprocessing calculation begin")
2475 if _extraArguments is None:
2476 for __xvalue in __xserie:
2477 __multiHX.append( _sFunction( __xvalue ) )
2478 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2479 for __xvalue in __xserie:
2480 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2481 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2482 for __xvalue in __xserie:
2483 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2485 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2486 # logging.debug("MULTF Internal monoprocessing calculation end")
2488 # logging.debug("MULTF Internal multifonction calculations end")
2491 # ==============================================================================
2492 if __name__ == "__main__":
2493 print('\n AUTODIAGNOSTIC\n')