1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2022 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
36 from functools import partial
37 from daCore import Persistence, PlatformInfo, Interfaces
38 from daCore import Templates
40 # ==============================================================================
41 class CacheManager(object):
43 Classe générale de gestion d'un cache de calculs
46 toleranceInRedundancy = 1.e-18,
47 lenghtOfRedundancy = -1,
50 Les caractéristiques de tolérance peuvent être modifiées à la création.
52 self.__tolerBP = float(toleranceInRedundancy)
53 self.__lenghtOR = int(lenghtOfRedundancy)
54 self.__initlnOR = self.__lenghtOR
64 def wasCalculatedIn(self, xValue, oName="" ):
65 "Vérifie l'existence d'un calcul correspondant à la valeur"
69 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
70 if not hasattr(xValue, 'size'):
72 elif (str(oName) != self.__listOPCV[i][3]):
74 elif (xValue.size != self.__listOPCV[i][0].size):
76 elif (numpy.ravel(xValue)[0] - self.__listOPCV[i][0][0]) > (self.__tolerBP * self.__listOPCV[i][2] / self.__listOPCV[i][0].size):
78 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < (self.__tolerBP * self.__listOPCV[i][2]):
80 __HxV = self.__listOPCV[i][1]
84 def storeValueInX(self, xValue, HxValue, oName="" ):
85 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
86 if self.__lenghtOR < 0:
87 self.__lenghtOR = 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
88 self.__initlnOR = self.__lenghtOR
89 self.__seenNames.append(str(oName))
90 if str(oName) not in self.__seenNames: # Etend la liste si nouveau
91 self.__lenghtOR += 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
92 self.__initlnOR += self.__lenghtOR
93 self.__seenNames.append(str(oName))
94 while len(self.__listOPCV) > self.__lenghtOR:
95 self.__listOPCV.pop(0)
96 self.__listOPCV.append( (
97 copy.copy(numpy.ravel(xValue)), # 0 Previous point
98 copy.copy(HxValue), # 1 Previous value
99 numpy.linalg.norm(xValue), # 2 Norm
100 str(oName), # 3 Operator name
105 self.__initlnOR = self.__lenghtOR
107 self.__enabled = False
111 self.__lenghtOR = self.__initlnOR
112 self.__enabled = True
114 # ==============================================================================
115 class Operator(object):
117 Classe générale d'interface de type opérateur simple
125 name = "GenericOperator",
128 avoidingRedundancy = True,
129 reducingMemoryUse = False,
130 inputAsMultiFunction = False,
131 enableMultiProcess = False,
132 extraArguments = None,
135 On construit un objet de ce type en fournissant, à l'aide de l'un des
136 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
139 - name : nom d'opérateur
140 - fromMethod : argument de type fonction Python
141 - fromMatrix : argument adapté au constructeur numpy.array/matrix
142 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
143 - reducingMemoryUse : booléen forçant (ou pas) des calculs moins
145 - inputAsMultiFunction : booléen indiquant une fonction explicitement
146 définie (ou pas) en multi-fonction
147 - extraArguments : arguments supplémentaires passés à la fonction de
148 base et ses dérivées (tuple ou dictionnaire)
150 self.__name = str(name)
151 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
152 self.__reduceM = bool( reducingMemoryUse )
153 self.__avoidRC = bool( avoidingRedundancy )
154 self.__inputAsMF = bool( inputAsMultiFunction )
155 self.__mpEnabled = bool( enableMultiProcess )
156 self.__extraArgs = extraArguments
157 if fromMethod is not None and self.__inputAsMF:
158 self.__Method = fromMethod # logtimer(fromMethod)
160 self.__Type = "Method"
161 elif fromMethod is not None and not self.__inputAsMF:
162 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
164 self.__Type = "Method"
165 elif fromMatrix is not None:
167 if isinstance(fromMatrix, str):
168 fromMatrix = PlatformInfo.strmatrix2liststr( fromMatrix )
169 self.__Matrix = numpy.asarray( fromMatrix, dtype=float )
170 self.__Type = "Matrix"
176 def disableAvoidingRedundancy(self):
178 Operator.CM.disable()
180 def enableAvoidingRedundancy(self):
185 Operator.CM.disable()
191 def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False):
193 Permet de restituer le résultat de l'application de l'opérateur à une
194 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
195 argument devant a priori être du bon type.
197 - les arguments par série sont :
198 - xValue : argument adapté pour appliquer l'opérateur
199 - HValue : valeur précalculée de l'opérateur en ce point
200 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
207 if HValue is not None:
211 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
213 if _HValue is not None:
214 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
216 for i in range(len(_HValue)):
217 _HxValue.append( _HValue[i] )
219 Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name)
224 for i, xv in enumerate(_xValue):
226 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
228 __alreadyCalculated = False
230 if __alreadyCalculated:
231 self.__addOneCacheCall()
234 if self.__Matrix is not None:
235 self.__addOneMatrixCall()
236 _hv = self.__Matrix @ numpy.ravel(xv)
238 self.__addOneMethodCall()
242 _HxValue.append( _hv )
244 if len(_xserie)>0 and self.__Matrix is None:
245 if self.__extraArgs is None:
246 _hserie = self.__Method( _xserie ) # Calcul MF
248 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
249 if not hasattr(_hserie, "pop"):
251 "The user input multi-function doesn't seem to return a"+\
252 " result sequence, behaving like a mono-function. It has"+\
260 Operator.CM.storeValueInX(_xv,_hv,self.__name)
262 if returnSerieAsArrayMatrix:
263 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
265 if argsAsSerie: return _HxValue
266 else: return _HxValue[-1]
268 def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
270 Permet de restituer le résultat de l'application de l'opérateur à des
271 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
272 argument devant a priori être du bon type. Si la uValue est None,
273 on suppose que l'opérateur ne s'applique qu'à xValue.
275 - paires : les arguments par paire sont :
276 - xValue : argument X adapté pour appliquer l'opérateur
277 - uValue : argument U adapté pour appliquer l'opérateur
278 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
280 if argsAsSerie: _xuValue = paires
281 else: _xuValue = (paires,)
282 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
284 if self.__Matrix is not None:
286 for paire in _xuValue:
287 _xValue, _uValue = paire
288 self.__addOneMatrixCall()
289 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
292 for paire in _xuValue:
293 _xValue, _uValue = paire
294 if _uValue is not None:
295 _xuArgs.append( paire )
297 _xuArgs.append( _xValue )
298 self.__addOneMethodCall( len(_xuArgs) )
299 if self.__extraArgs is None:
300 _HxValue = self.__Method( _xuArgs ) # Calcul MF
302 _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF
304 if returnSerieAsArrayMatrix:
305 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
307 if argsAsSerie: return _HxValue
308 else: return _HxValue[-1]
310 def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
312 Permet de restituer le résultat de l'application de l'opérateur à une
313 série d'arguments xValue, sachant que l'opérateur est valable en
314 xNominal. Cette méthode se contente d'appliquer, son argument devant a
315 priori être du bon type. Si l'opérateur est linéaire car c'est une
316 matrice, alors il est valable en tout point nominal et xNominal peut
317 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
318 permet d'indiquer que l'argument est multi-paires.
320 - paires : les arguments par paire sont :
321 - xNominal : série d'arguments permettant de donner le point où
322 l'opérateur est construit pour être ensuite appliqué
323 - xValue : série d'arguments adaptés pour appliquer l'opérateur
324 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
326 if argsAsSerie: _nxValue = paires
327 else: _nxValue = (paires,)
328 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
330 if self.__Matrix is not None:
332 for paire in _nxValue:
333 _xNominal, _xValue = paire
334 self.__addOneMatrixCall()
335 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
337 self.__addOneMethodCall( len(_nxValue) )
338 if self.__extraArgs is None:
339 _HxValue = self.__Method( _nxValue ) # Calcul MF
341 _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
343 if returnSerieAsArrayMatrix:
344 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
346 if argsAsSerie: return _HxValue
347 else: return _HxValue[-1]
349 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
351 Permet de renvoyer l'opérateur sous la forme d'une matrice
353 if self.__Matrix is not None:
354 self.__addOneMatrixCall()
355 mValue = [self.__Matrix,]
356 elif not isinstance(ValueForMethodForm,str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
359 self.__addOneMethodCall( len(ValueForMethodForm) )
360 for _vfmf in ValueForMethodForm:
361 mValue.append( self.__Method(((_vfmf, None),)) )
363 self.__addOneMethodCall()
364 mValue = self.__Method(((ValueForMethodForm, None),))
366 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
368 if argsAsSerie: return mValue
369 else: return mValue[-1]
373 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
374 la forme d'une matrice
376 if self.__Matrix is not None:
377 return self.__Matrix.shape
379 raise ValueError("Matrix form of the operator is not available, nor the shape")
381 def nbcalls(self, which=None):
383 Renvoie les nombres d'évaluations de l'opérateur
386 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
387 self.__NbCallsAsMatrix,
388 self.__NbCallsAsMethod,
389 self.__NbCallsOfCached,
390 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
391 Operator.NbCallsAsMatrix,
392 Operator.NbCallsAsMethod,
393 Operator.NbCallsOfCached,
395 if which is None: return __nbcalls
396 else: return __nbcalls[which]
398 def __addOneMatrixCall(self):
399 "Comptabilise un appel"
400 self.__NbCallsAsMatrix += 1 # Decompte local
401 Operator.NbCallsAsMatrix += 1 # Decompte global
403 def __addOneMethodCall(self, nb = 1):
404 "Comptabilise un appel"
405 self.__NbCallsAsMethod += nb # Decompte local
406 Operator.NbCallsAsMethod += nb # Decompte global
408 def __addOneCacheCall(self):
409 "Comptabilise un appel"
410 self.__NbCallsOfCached += 1 # Decompte local
411 Operator.NbCallsOfCached += 1 # Decompte global
413 # ==============================================================================
414 class FullOperator(object):
416 Classe générale d'interface de type opérateur complet
417 (Direct, Linéaire Tangent, Adjoint)
420 name = "GenericFullOperator",
422 asOneFunction = None, # 1 Fonction
423 asThreeFunctions = None, # 3 Fonctions in a dictionary
424 asScript = None, # 1 or 3 Fonction(s) by script
425 asDict = None, # Parameters
427 extraArguments = None,
428 performancePrf = None,
429 inputAsMF = False,# Fonction(s) as Multi-Functions
434 self.__name = str(name)
435 self.__check = bool(toBeChecked)
436 self.__extraArgs = extraArguments
441 if (asDict is not None) and isinstance(asDict, dict):
442 __Parameters.update( asDict )
443 # Priorité à EnableMultiProcessingInDerivatives=True
444 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
445 __Parameters["EnableMultiProcessingInDerivatives"] = True
446 __Parameters["EnableMultiProcessingInEvaluation"] = False
447 if "EnableMultiProcessingInDerivatives" not in __Parameters:
448 __Parameters["EnableMultiProcessingInDerivatives"] = False
449 if __Parameters["EnableMultiProcessingInDerivatives"]:
450 __Parameters["EnableMultiProcessingInEvaluation"] = False
451 if "EnableMultiProcessingInEvaluation" not in __Parameters:
452 __Parameters["EnableMultiProcessingInEvaluation"] = False
453 if "withIncrement" in __Parameters: # Temporaire
454 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
455 # Le défaut est équivalent à "ReducedOverallRequirements"
456 __reduceM, __avoidRC = True, True
457 if performancePrf is not None:
458 if performancePrf == "ReducedAmountOfCalculation":
459 __reduceM, __avoidRC = False, True
460 elif performancePrf == "ReducedMemoryFootprint":
461 __reduceM, __avoidRC = True, False
462 elif performancePrf == "NoSavings":
463 __reduceM, __avoidRC = False, False
465 if asScript is not None:
466 __Matrix, __Function = None, None
468 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
470 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
471 __Function.update({"useApproximatedDerivatives":True})
472 __Function.update(__Parameters)
473 elif asThreeFunctions:
475 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
476 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
477 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
479 __Function.update(__Parameters)
482 if asOneFunction is not None:
483 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
484 if asOneFunction["Direct"] is not None:
485 __Function = asOneFunction
487 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
489 __Function = { "Direct":asOneFunction }
490 __Function.update({"useApproximatedDerivatives":True})
491 __Function.update(__Parameters)
492 elif asThreeFunctions is not None:
493 if isinstance(asThreeFunctions, dict) and \
494 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
495 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
496 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
497 __Function = asThreeFunctions
498 elif isinstance(asThreeFunctions, dict) and \
499 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
500 __Function = asThreeFunctions
501 __Function.update({"useApproximatedDerivatives":True})
504 "The functions has to be given in a dictionnary which have either"+\
505 " 1 key (\"Direct\") or"+\
506 " 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
507 if "Direct" not in asThreeFunctions:
508 __Function["Direct"] = asThreeFunctions["Tangent"]
509 __Function.update(__Parameters)
513 if appliedInX is not None and isinstance(appliedInX, dict):
514 __appliedInX = appliedInX
515 elif appliedInX is not None:
516 __appliedInX = {"HXb":appliedInX}
520 if scheduledBy is not None:
521 self.__T = scheduledBy
523 if isinstance(__Function, dict) and \
524 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
525 ("Direct" in __Function) and (__Function["Direct"] is not None):
526 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
527 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
528 if "withdX" not in __Function: __Function["withdX"] = None
529 if "withReducingMemoryUse" not in __Function: __Function["withReducingMemoryUse"] = __reduceM
530 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = __avoidRC
531 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
532 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
533 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
534 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
535 from daCore import NumericObjects
536 FDA = NumericObjects.FDApproximation(
538 Function = __Function["Direct"],
539 centeredDF = __Function["CenteredFiniteDifference"],
540 increment = __Function["DifferentialIncrement"],
541 dX = __Function["withdX"],
542 extraArguments = self.__extraArgs,
543 reducingMemoryUse = __Function["withReducingMemoryUse"],
544 avoidingRedundancy = __Function["withAvoidingRedundancy"],
545 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
546 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
547 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
548 mpWorkers = __Function["NumberOfProcesses"],
549 mfEnabled = __Function["withmfEnabled"],
551 self.__FO["Direct"] = Operator(
553 fromMethod = FDA.DirectOperator,
554 reducingMemoryUse = __reduceM,
555 avoidingRedundancy = __avoidRC,
556 inputAsMultiFunction = inputAsMF,
557 extraArguments = self.__extraArgs,
558 enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
559 self.__FO["Tangent"] = Operator(
560 name = self.__name+"Tangent",
561 fromMethod = FDA.TangentOperator,
562 reducingMemoryUse = __reduceM,
563 avoidingRedundancy = __avoidRC,
564 inputAsMultiFunction = inputAsMF,
565 extraArguments = self.__extraArgs )
566 self.__FO["Adjoint"] = Operator(
567 name = self.__name+"Adjoint",
568 fromMethod = FDA.AdjointOperator,
569 reducingMemoryUse = __reduceM,
570 avoidingRedundancy = __avoidRC,
571 inputAsMultiFunction = inputAsMF,
572 extraArguments = self.__extraArgs )
573 elif isinstance(__Function, dict) and \
574 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
575 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
576 self.__FO["Direct"] = Operator(
578 fromMethod = __Function["Direct"],
579 reducingMemoryUse = __reduceM,
580 avoidingRedundancy = __avoidRC,
581 inputAsMultiFunction = inputAsMF,
582 extraArguments = self.__extraArgs,
583 enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
584 self.__FO["Tangent"] = Operator(
585 name = self.__name+"Tangent",
586 fromMethod = __Function["Tangent"],
587 reducingMemoryUse = __reduceM,
588 avoidingRedundancy = __avoidRC,
589 inputAsMultiFunction = inputAsMF,
590 extraArguments = self.__extraArgs )
591 self.__FO["Adjoint"] = Operator(
592 name = self.__name+"Adjoint",
593 fromMethod = __Function["Adjoint"],
594 reducingMemoryUse = __reduceM,
595 avoidingRedundancy = __avoidRC,
596 inputAsMultiFunction = inputAsMF,
597 extraArguments = self.__extraArgs )
598 elif asMatrix is not None:
599 if isinstance(__Matrix, str):
600 __Matrix = PlatformInfo.strmatrix2liststr( __Matrix )
601 __matrice = numpy.asarray( __Matrix, dtype=float )
602 self.__FO["Direct"] = Operator(
604 fromMatrix = __matrice,
605 reducingMemoryUse = __reduceM,
606 avoidingRedundancy = __avoidRC,
607 inputAsMultiFunction = inputAsMF,
608 enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
609 self.__FO["Tangent"] = Operator(
610 name = self.__name+"Tangent",
611 fromMatrix = __matrice,
612 reducingMemoryUse = __reduceM,
613 avoidingRedundancy = __avoidRC,
614 inputAsMultiFunction = inputAsMF )
615 self.__FO["Adjoint"] = Operator(
616 name = self.__name+"Adjoint",
617 fromMatrix = __matrice.T,
618 reducingMemoryUse = __reduceM,
619 avoidingRedundancy = __avoidRC,
620 inputAsMultiFunction = inputAsMF )
624 "The %s object is improperly defined or undefined,"%self.__name+\
625 " it requires at minima either a matrix, a Direct operator for"+\
626 " approximate derivatives or a Tangent/Adjoint operators pair."+\
627 " Please check your operator input.")
629 if __appliedInX is not None:
630 self.__FO["AppliedInX"] = {}
631 for key in __appliedInX:
632 if isinstance(__appliedInX[key], str):
633 __appliedInX[key] = PlatformInfo.strvect2liststr( __appliedInX[key] )
634 self.__FO["AppliedInX"][key] = numpy.ravel( __appliedInX[key] ).reshape((-1,1))
636 self.__FO["AppliedInX"] = None
642 "x.__repr__() <==> repr(x)"
643 return repr(self.__FO)
646 "x.__str__() <==> str(x)"
647 return str(self.__FO)
649 # ==============================================================================
650 class Algorithm(object):
652 Classe générale d'interface de type algorithme
654 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
655 d'assimilation, en fournissant un container (dictionnaire) de variables
656 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
658 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
660 def __init__(self, name):
662 L'initialisation présente permet de fabriquer des variables de stockage
663 disponibles de manière générique dans les algorithmes élémentaires. Ces
664 variables de stockage sont ensuite conservées dans un dictionnaire
665 interne à l'objet, mais auquel on accède par la méthode "get".
667 Les variables prévues sont :
668 - APosterioriCorrelations : matrice de corrélations de la matrice A
669 - APosterioriCovariance : matrice de covariances a posteriori : A
670 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
671 - APosterioriVariances : vecteur des variances de la matrice A
672 - Analysis : vecteur d'analyse : Xa
673 - BMA : Background moins Analysis : Xa - Xb
674 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
675 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
676 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
677 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
678 - CostFunctionJo : partie observations de la fonction-coût : Jo
679 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
680 - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
681 - CurrentOptimum : état optimal courant lors d'itérations
682 - CurrentState : état courant lors d'itérations
683 - CurrentStepNumber : pas courant d'avancement dans les algorithmes en évolution, à partir de 0
684 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
685 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
686 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
687 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
688 - Innovation : l'innovation : d = Y - H(X)
689 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
690 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
691 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
692 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
693 - KalmanGainAtOptimum : gain de Kalman à l'optimum
694 - MahalanobisConsistency : indicateur de consistance des covariances
695 - OMA : Observation moins Analyse : Y - Xa
696 - OMB : Observation moins Background : Y - Xb
697 - ForecastCovariance : covariance de l'état prédit courant lors d'itérations
698 - ForecastState : état prédit courant lors d'itérations
699 - Residu : dans le cas des algorithmes de vérification
700 - SampledStateForQuantiles : échantillons d'états pour l'estimation des quantiles
701 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
702 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
703 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
704 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
705 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
706 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
707 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
708 On peut rajouter des variables à stocker dans l'initialisation de
709 l'algorithme élémentaire qui va hériter de cette classe
711 logging.debug("%s Initialisation", str(name))
712 self._m = PlatformInfo.SystemUsage()
714 self._name = str( name )
715 self._parameters = {"StoreSupplementaryCalculations":[]}
716 self.__internal_state = {}
717 self.__required_parameters = {}
718 self.__required_inputs = {
719 "RequiredInputValues":{"mandatory":(), "optional":()},
720 "ClassificationTags":[],
722 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
723 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
724 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
725 self.__replace_by_the_new_name = {} # Nouveau nom à partir d'un nom ancien
727 self.StoredVariables = {}
728 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
729 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
730 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
731 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
732 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
733 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
734 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
735 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
736 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
737 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
738 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
739 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
740 self.StoredVariables["CurrentEnsembleState"] = Persistence.OneMatrix(name = "CurrentEnsembleState")
741 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
742 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
743 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
744 self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
745 self.StoredVariables["ForecastCovariance"] = Persistence.OneMatrix(name = "ForecastCovariance")
746 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
747 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
748 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
749 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
750 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
751 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
752 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
753 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
754 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
755 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
756 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
757 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
758 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
759 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
760 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
761 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
762 self.StoredVariables["SampledStateForQuantiles"] = Persistence.OneMatrix(name = "SampledStateForQuantiles")
763 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
764 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
765 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
766 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
767 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
768 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
769 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
770 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
772 for k in self.StoredVariables:
773 self.__canonical_stored_name[k.lower()] = k
775 for k, v in self.__variable_names_not_public.items():
776 self.__canonical_parameter_name[k.lower()] = k
777 self.__canonical_parameter_name["algorithm"] = "Algorithm"
778 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
780 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
782 logging.debug("%s Lancement", self._name)
783 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
784 self._getTimeState(reset=True)
786 # Mise a jour des paramètres internes avec le contenu de Parameters, en
787 # reprenant les valeurs par défauts pour toutes celles non définies
788 self.__setParameters(Parameters, reset=True)
789 for k, v in self.__variable_names_not_public.items():
790 if k not in self._parameters: self.__setParameters( {k:v} )
792 # Corrections et compléments des vecteurs
793 def __test_vvalue(argument, variable, argname, symbol=None):
794 if symbol is None: symbol = variable
796 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
797 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol))
798 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
799 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol))
801 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol))
803 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
804 logging.debug("%s %s vector %s is required and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
805 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
806 logging.debug("%s %s vector %s is optional and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
809 "%s %s vector %s is set although neither required nor optional, and its size is %i."%(
810 self._name,argname,symbol,numpy.array(argument).size))
812 __test_vvalue( Xb, "Xb", "Background or initial state" )
813 __test_vvalue( Y, "Y", "Observation" )
814 __test_vvalue( U, "U", "Control" )
816 # Corrections et compléments des covariances
817 def __test_cvalue(argument, variable, argname, symbol=None):
818 if symbol is None: symbol = variable
820 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
821 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol))
822 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
823 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol))
825 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol))
827 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
828 logging.debug("%s %s error covariance matrix %s is required and set."%(self._name,argname,symbol))
829 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
830 logging.debug("%s %s error covariance matrix %s is optional and set."%(self._name,argname,symbol))
832 logging.debug("%s %s error covariance matrix %s is set although neither required nor optional."%(self._name,argname,symbol))
834 __test_cvalue( B, "B", "Background" )
835 __test_cvalue( R, "R", "Observation" )
836 __test_cvalue( Q, "Q", "Evolution" )
838 # Corrections et compléments des opérateurs
839 def __test_ovalue(argument, variable, argname, symbol=None):
840 if symbol is None: symbol = variable
841 if argument is None or (isinstance(argument,dict) and len(argument)==0):
842 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
843 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol))
844 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
845 logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol))
847 logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol))
849 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
850 logging.debug("%s %s operator %s is required and set."%(self._name,argname,symbol))
851 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
852 logging.debug("%s %s operator %s is optional and set."%(self._name,argname,symbol))
854 logging.debug("%s %s operator %s is set although neither required nor optional."%(self._name,argname,symbol))
856 __test_ovalue( HO, "HO", "Observation", "H" )
857 __test_ovalue( EM, "EM", "Evolution", "M" )
858 __test_ovalue( CM, "CM", "Control Model", "C" )
860 # Corrections et compléments des bornes
861 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
862 logging.debug("%s Bounds taken into account"%(self._name,))
864 self._parameters["Bounds"] = None
865 if ("StateBoundsForQuantiles" in self._parameters) \
866 and isinstance(self._parameters["StateBoundsForQuantiles"], (list, tuple)) \
867 and (len(self._parameters["StateBoundsForQuantiles"]) > 0):
868 logging.debug("%s Bounds for quantiles states taken into account"%(self._name,))
869 # Attention : contrairement à Bounds, pas de défaut à None, sinon on ne peut pas être sans bornes
871 # Corrections et compléments de l'initialisation en X
872 if "InitializationPoint" in self._parameters:
874 if self._parameters["InitializationPoint"] is not None and hasattr(self._parameters["InitializationPoint"],'size'):
875 if self._parameters["InitializationPoint"].size != numpy.ravel(Xb).size:
876 raise ValueError("Incompatible size %i of forced initial point that have to replace the background of size %i" \
877 %(self._parameters["InitializationPoint"].size,numpy.ravel(Xb).size))
878 # Obtenu par typecast : numpy.ravel(self._parameters["InitializationPoint"])
880 self._parameters["InitializationPoint"] = numpy.ravel(Xb)
882 if self._parameters["InitializationPoint"] is None:
883 raise ValueError("Forced initial point can not be set without any given Background or required value")
885 # Correction pour pallier a un bug de TNC sur le retour du Minimum
886 if "Minimizer" in self._parameters and self._parameters["Minimizer"] == "TNC":
887 self.setParameterValue("StoreInternalVariables",True)
889 # Verbosité et logging
890 if logging.getLogger().level < logging.WARNING:
891 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
892 self._parameters["optmessages"] = 15
894 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
895 self._parameters["optmessages"] = 0
899 def _post_run(self,_oH=None):
901 if ("StoreSupplementaryCalculations" in self._parameters) and \
902 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
903 for _A in self.StoredVariables["APosterioriCovariance"]:
904 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
905 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
906 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
907 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
908 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
909 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
910 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
911 self.StoredVariables["APosterioriCorrelations"].store( _C )
912 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
914 "%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i",
915 self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
917 "%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i",
918 self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
919 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
920 logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
921 logging.debug("%s Terminé", self._name)
924 def _toStore(self, key):
925 "True if in StoreSupplementaryCalculations, else False"
926 return key in self._parameters["StoreSupplementaryCalculations"]
928 def get(self, key=None):
930 Renvoie l'une des variables stockées identifiée par la clé, ou le
931 dictionnaire de l'ensemble des variables disponibles en l'absence de
932 clé. Ce sont directement les variables sous forme objet qui sont
933 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
934 des classes de persistance.
937 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
939 return self.StoredVariables
941 def __contains__(self, key=None):
942 "D.__contains__(k) -> True if D has a key k, else False"
943 if key is None or key.lower() not in self.__canonical_stored_name:
946 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
949 "D.keys() -> list of D's keys"
950 if hasattr(self, "StoredVariables"):
951 return self.StoredVariables.keys()
956 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
957 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
958 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
963 raise TypeError("pop expected at least 1 arguments, got 0")
964 "If key is not found, d is returned if given, otherwise KeyError is raised"
970 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
972 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
973 sa forme mathématique la plus naturelle possible.
975 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
977 def defineRequiredParameter(self,
989 Permet de définir dans l'algorithme des paramètres requis et leurs
990 caractéristiques par défaut.
993 raise ValueError("A name is mandatory to define a required parameter.")
995 self.__required_parameters[name] = {
997 "typecast" : typecast,
1000 "listval" : listval,
1001 "listadv" : listadv,
1002 "message" : message,
1003 "oldname" : oldname,
1005 self.__canonical_parameter_name[name.lower()] = name
1006 if oldname is not None:
1007 self.__canonical_parameter_name[oldname.lower()] = name # Conversion
1008 self.__replace_by_the_new_name[oldname.lower()] = name
1009 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
1011 def getRequiredParameters(self, noDetails=True):
1013 Renvoie la liste des noms de paramètres requis ou directement le
1014 dictionnaire des paramètres requis.
1017 return sorted(self.__required_parameters.keys())
1019 return self.__required_parameters
1021 def setParameterValue(self, name=None, value=None):
1023 Renvoie la valeur d'un paramètre requis de manière contrôlée
1025 __k = self.__canonical_parameter_name[name.lower()]
1026 default = self.__required_parameters[__k]["default"]
1027 typecast = self.__required_parameters[__k]["typecast"]
1028 minval = self.__required_parameters[__k]["minval"]
1029 maxval = self.__required_parameters[__k]["maxval"]
1030 listval = self.__required_parameters[__k]["listval"]
1031 listadv = self.__required_parameters[__k]["listadv"]
1033 if value is None and default is None:
1035 elif value is None and default is not None:
1036 if typecast is None: __val = default
1037 else: __val = typecast( default )
1039 if typecast is None: __val = value
1042 __val = typecast( value )
1044 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
1046 if minval is not None and (numpy.array(__val, float) < minval).any():
1047 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
1048 if maxval is not None and (numpy.array(__val, float) > maxval).any():
1049 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
1050 if listval is not None or listadv is not None:
1051 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
1053 if listval is not None and v in listval: continue
1054 elif listadv is not None and v in listadv: continue
1056 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
1057 elif not (listval is not None and __val in listval) and not (listadv is not None and __val in listadv):
1058 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
1062 def requireInputArguments(self, mandatory=(), optional=()):
1064 Permet d'imposer des arguments de calcul requis en entrée.
1066 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
1067 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
1069 def getInputArguments(self):
1071 Permet d'obtenir les listes des arguments de calcul requis en entrée.
1073 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
1075 def setAttributes(self, tags=()):
1077 Permet d'adjoindre des attributs comme les tags de classification.
1078 Renvoie la liste actuelle dans tous les cas.
1080 self.__required_inputs["ClassificationTags"].extend( tags )
1081 return self.__required_inputs["ClassificationTags"]
1083 def __setParameters(self, fromDico={}, reset=False):
1085 Permet de stocker les paramètres reçus dans le dictionnaire interne.
1087 self._parameters.update( fromDico )
1088 __inverse_fromDico_keys = {}
1089 for k in fromDico.keys():
1090 if k.lower() in self.__canonical_parameter_name:
1091 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
1092 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
1093 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
1095 for k in __inverse_fromDico_keys.values():
1096 if k.lower() in self.__replace_by_the_new_name:
1097 __newk = self.__replace_by_the_new_name[k.lower()]
1098 __msg = "the parameter \"%s\" used in \"%s\" algorithm case is deprecated and has to be replaced by \"%s\"."%(k,self._name,__newk)
1099 __msg += " Please update your code."
1100 warnings.warn(__msg, FutureWarning, stacklevel=50)
1102 for k in self.__required_parameters.keys():
1103 if k in __canonic_fromDico_keys:
1104 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
1106 self._parameters[k] = self.setParameterValue(k)
1109 if hasattr(self._parameters[k],"__len__") and len(self._parameters[k]) > 100:
1110 logging.debug("%s %s de longueur %s", self._name, self.__required_parameters[k]["message"], len(self._parameters[k]))
1112 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
1114 def _setInternalState(self, key=None, value=None, fromDico={}, reset=False):
1116 Permet de stocker des variables nommées constituant l'état interne
1118 if reset: # Vide le dictionnaire préalablement
1119 self.__internal_state = {}
1120 if key is not None and value is not None:
1121 self.__internal_state[key] = value
1122 self.__internal_state.update( dict(fromDico) )
1124 def _getInternalState(self, key=None):
1126 Restitue un état interne sous la forme d'un dictionnaire de variables nommées
1128 if key is not None and key in self.__internal_state:
1129 return self.__internal_state[key]
1131 return self.__internal_state
1133 def _getTimeState(self, reset=False):
1135 Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde
1138 self.__initial_cpu_time = time.process_time()
1139 self.__initial_elapsed_time = time.perf_counter()
1142 self.__cpu_time = time.process_time() - self.__initial_cpu_time
1143 self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time
1144 return self.__cpu_time, self.__elapsed_time
1146 def _StopOnTimeLimit(self, X=None, withReason=False):
1147 "Stop criteria on time limit: True/False [+ Reason]"
1148 c, e = self._getTimeState()
1149 if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]:
1150 __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"])
1151 elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]:
1152 __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"])
1154 __SC, __SR = False, ""
1160 # ==============================================================================
1161 class PartialAlgorithm(object):
1163 Classe pour mimer "Algorithm" du point de vue stockage, mais sans aucune
1164 action avancée comme la vérification . Pour les méthodes reprises ici,
1165 le fonctionnement est identique à celles de la classe "Algorithm".
1167 def __init__(self, name):
1168 self._name = str( name )
1169 self._parameters = {"StoreSupplementaryCalculations":[]}
1171 self.StoredVariables = {}
1172 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
1173 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
1174 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
1175 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
1176 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
1177 self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
1179 self.__canonical_stored_name = {}
1180 for k in self.StoredVariables:
1181 self.__canonical_stored_name[k.lower()] = k
1183 def _toStore(self, key):
1184 "True if in StoreSupplementaryCalculations, else False"
1185 return key in self._parameters["StoreSupplementaryCalculations"]
1187 def get(self, key=None):
1189 Renvoie l'une des variables stockées identifiée par la clé, ou le
1190 dictionnaire de l'ensemble des variables disponibles en l'absence de
1191 clé. Ce sont directement les variables sous forme objet qui sont
1192 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
1193 des classes de persistance.
1196 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
1198 return self.StoredVariables
1200 # ==============================================================================
1201 class AlgorithmAndParameters(object):
1203 Classe générale d'interface d'action pour l'algorithme et ses paramètres
1206 name = "GenericAlgorithm",
1213 self.__name = str(name)
1217 self.__algorithm = {}
1218 self.__algorithmFile = None
1219 self.__algorithmName = None
1221 self.updateParameters( asDict, asScript )
1223 if asAlgorithm is None and asScript is not None:
1224 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1226 __Algo = asAlgorithm
1228 if __Algo is not None:
1229 self.__A = str(__Algo)
1230 self.__P.update( {"Algorithm":self.__A} )
1232 self.__setAlgorithm( self.__A )
1234 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
1236 def updateParameters(self,
1240 "Mise a jour des parametres"
1241 if asDict is None and asScript is not None:
1242 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1246 if __Dict is not None:
1247 self.__P.update( dict(__Dict) )
1249 def executePythonScheme(self, asDictAO = None):
1250 "Permet de lancer le calcul d'assimilation"
1251 Operator.CM.clearCache()
1253 if not isinstance(asDictAO, dict):
1254 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1255 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
1256 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
1257 else: self.__Xb = None
1258 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
1259 else: self.__Y = asDictAO["Observation"]
1260 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
1261 else: self.__U = asDictAO["ControlInput"]
1262 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
1263 else: self.__HO = asDictAO["ObservationOperator"]
1264 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
1265 else: self.__EM = asDictAO["EvolutionModel"]
1266 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
1267 else: self.__CM = asDictAO["ControlModel"]
1268 self.__B = asDictAO["BackgroundError"]
1269 self.__R = asDictAO["ObservationError"]
1270 self.__Q = asDictAO["EvolutionError"]
1272 self.__shape_validate()
1274 self.__algorithm.run(
1284 Parameters = self.__P,
1288 def executeYACSScheme(self, FileName=None):
1289 "Permet de lancer le calcul d'assimilation"
1290 if FileName is None or not os.path.exists(FileName):
1291 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1293 __file = os.path.abspath(FileName)
1294 logging.debug("The YACS file name is \"%s\"."%__file)
1295 if not PlatformInfo.has_salome or \
1296 not PlatformInfo.has_yacs or \
1297 not PlatformInfo.has_adao:
1298 raise ImportError("\n\n"+\
1299 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
1300 "Please load the right environnement before trying to use it.\n")
1303 import SALOMERuntime
1305 SALOMERuntime.RuntimeSALOME_setRuntime()
1307 r = pilot.getRuntime()
1308 xmlLoader = loader.YACSLoader()
1309 xmlLoader.registerProcCataLoader()
1311 catalogAd = r.loadCatalog("proc", __file)
1312 r.addCatalog(catalogAd)
1317 p = xmlLoader.load(__file)
1318 except IOError as ex:
1319 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1321 logger = p.getLogger("parser")
1322 if not logger.isEmpty():
1323 print("The imported YACS XML schema has errors on parsing:")
1324 print(logger.getStr())
1327 print("The YACS XML schema is not valid and will not be executed:")
1328 print(p.getErrorReport())
1330 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1331 p.checkConsistency(info)
1332 if info.areWarningsOrErrors():
1333 print("The YACS XML schema is not coherent and will not be executed:")
1334 print(info.getGlobalRepr())
1336 e = pilot.ExecutorSwig()
1338 if p.getEffectiveState() != pilot.DONE:
1339 print(p.getErrorReport())
1343 def get(self, key = None):
1344 "Vérifie l'existence d'une clé de variable ou de paramètres"
1345 if key in self.__algorithm:
1346 return self.__algorithm.get( key )
1347 elif key in self.__P:
1348 return self.__P[key]
1350 allvariables = self.__P
1351 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1354 def pop(self, k, d):
1355 "Necessaire pour le pickling"
1356 return self.__algorithm.pop(k, d)
1358 def getAlgorithmRequiredParameters(self, noDetails=True):
1359 "Renvoie la liste des paramètres requis selon l'algorithme"
1360 return self.__algorithm.getRequiredParameters(noDetails)
1362 def getAlgorithmInputArguments(self):
1363 "Renvoie la liste des entrées requises selon l'algorithme"
1364 return self.__algorithm.getInputArguments()
1366 def getAlgorithmAttributes(self):
1367 "Renvoie la liste des attributs selon l'algorithme"
1368 return self.__algorithm.setAttributes()
1370 def setObserver(self, __V, __O, __I, __S):
1371 if self.__algorithm is None \
1372 or isinstance(self.__algorithm, dict) \
1373 or not hasattr(self.__algorithm,"StoredVariables"):
1374 raise ValueError("No observer can be build before choosing an algorithm.")
1375 if __V not in self.__algorithm:
1376 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1378 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1381 HookParameters = __I,
1384 def removeObserver(self, __V, __O, __A = False):
1385 if self.__algorithm is None \
1386 or isinstance(self.__algorithm, dict) \
1387 or not hasattr(self.__algorithm,"StoredVariables"):
1388 raise ValueError("No observer can be removed before choosing an algorithm.")
1389 if __V not in self.__algorithm:
1390 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1392 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1397 def hasObserver(self, __V):
1398 if self.__algorithm is None \
1399 or isinstance(self.__algorithm, dict) \
1400 or not hasattr(self.__algorithm,"StoredVariables"):
1402 if __V not in self.__algorithm:
1404 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1407 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1408 for k in self.__variable_names_not_public:
1409 if k in __allvariables: __allvariables.remove(k)
1410 return __allvariables
1412 def __contains__(self, key=None):
1413 "D.__contains__(k) -> True if D has a key k, else False"
1414 return key in self.__algorithm or key in self.__P
1417 "x.__repr__() <==> repr(x)"
1418 return repr(self.__A)+", "+repr(self.__P)
1421 "x.__str__() <==> str(x)"
1422 return str(self.__A)+", "+str(self.__P)
1424 def __setAlgorithm(self, choice = None ):
1426 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1427 d'assimilation. L'argument est un champ caractère se rapportant au nom
1428 d'un algorithme réalisant l'opération sur les arguments fixes.
1431 raise ValueError("Error: algorithm choice has to be given")
1432 if self.__algorithmName is not None:
1433 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1434 daDirectory = "daAlgorithms"
1436 # Recherche explicitement le fichier complet
1437 # ------------------------------------------
1439 for directory in sys.path:
1440 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1441 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1442 if module_path is None:
1444 "No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1446 # Importe le fichier complet comme un module
1447 # ------------------------------------------
1449 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1450 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1451 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1452 raise ImportError("this module does not define a valid elementary algorithm.")
1453 self.__algorithmName = str(choice)
1454 sys.path = sys_path_tmp ; del sys_path_tmp
1455 except ImportError as e:
1457 "The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1459 # Instancie un objet du type élémentaire du fichier
1460 # -------------------------------------------------
1461 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1464 def __shape_validate(self):
1466 Validation de la correspondance correcte des tailles des variables et
1467 des matrices s'il y en a.
1469 if self.__Xb is None: __Xb_shape = (0,)
1470 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1471 elif hasattr(self.__Xb,"shape"):
1472 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1473 else: __Xb_shape = self.__Xb.shape()
1474 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1476 if self.__Y is None: __Y_shape = (0,)
1477 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1478 elif hasattr(self.__Y,"shape"):
1479 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1480 else: __Y_shape = self.__Y.shape()
1481 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1483 if self.__U is None: __U_shape = (0,)
1484 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1485 elif hasattr(self.__U,"shape"):
1486 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1487 else: __U_shape = self.__U.shape()
1488 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1490 if self.__B is None: __B_shape = (0,0)
1491 elif hasattr(self.__B,"shape"):
1492 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1493 else: __B_shape = self.__B.shape()
1494 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1496 if self.__R is None: __R_shape = (0,0)
1497 elif hasattr(self.__R,"shape"):
1498 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1499 else: __R_shape = self.__R.shape()
1500 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1502 if self.__Q is None: __Q_shape = (0,0)
1503 elif hasattr(self.__Q,"shape"):
1504 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1505 else: __Q_shape = self.__Q.shape()
1506 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1508 if len(self.__HO) == 0: __HO_shape = (0,0)
1509 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1510 elif hasattr(self.__HO["Direct"],"shape"):
1511 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1512 else: __HO_shape = self.__HO["Direct"].shape()
1513 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1515 if len(self.__EM) == 0: __EM_shape = (0,0)
1516 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1517 elif hasattr(self.__EM["Direct"],"shape"):
1518 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1519 else: __EM_shape = self.__EM["Direct"].shape()
1520 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1522 if len(self.__CM) == 0: __CM_shape = (0,0)
1523 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1524 elif hasattr(self.__CM["Direct"],"shape"):
1525 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1526 else: __CM_shape = self.__CM["Direct"].shape()
1527 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1529 # Vérification des conditions
1530 # ---------------------------
1531 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1532 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1533 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1534 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1536 if not( min(__B_shape) == max(__B_shape) ):
1537 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1538 if not( min(__R_shape) == max(__R_shape) ):
1539 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1540 if not( min(__Q_shape) == max(__Q_shape) ):
1541 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1542 if not( min(__EM_shape) == max(__EM_shape) ):
1543 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1545 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1547 "Shape characteristic of observation operator (H)"+\
1548 " \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1549 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1551 "Shape characteristic of observation operator (H)"+\
1552 " \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1553 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1555 "Shape characteristic of observation operator (H)"+\
1556 " \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1557 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1559 "Shape characteristic of observation operator (H)"+\
1560 " \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1562 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1563 if self.__algorithmName in ["EnsembleBlue",]:
1564 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1565 self.__Xb = Persistence.OneVector("Background")
1566 for member in asPersistentVector:
1567 self.__Xb.store( numpy.asarray(member, dtype=float) )
1568 __Xb_shape = min(__B_shape)
1571 "Shape characteristic of a priori errors covariance matrix (B)"+\
1572 " \"%s\" and background vector (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1574 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1576 "Shape characteristic of observation errors covariance matrix (R)"+\
1577 " \"%s\" and observation vector (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1579 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1581 "Shape characteristic of evolution model (EM)"+\
1582 " \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1584 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1586 "Shape characteristic of control model (CM)"+\
1587 " \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1589 if ("Bounds" in self.__P) \
1590 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1591 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1592 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1593 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1595 if ("StateBoundsForQuantiles" in self.__P) \
1596 and (isinstance(self.__P["StateBoundsForQuantiles"], list) or isinstance(self.__P["StateBoundsForQuantiles"], tuple)) \
1597 and (len(self.__P["StateBoundsForQuantiles"]) != max(__Xb_shape)):
1598 raise ValueError("The number \"%s\" of bound pairs for the quantile state (X) components is different of the size \"%s\" of the state itself." \
1599 %(len(self.__P["StateBoundsForQuantiles"]),max(__Xb_shape)))
1603 # ==============================================================================
1604 class RegulationAndParameters(object):
1606 Classe générale d'interface d'action pour la régulation et ses paramètres
1609 name = "GenericRegulation",
1616 self.__name = str(name)
1619 if asAlgorithm is None and asScript is not None:
1620 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1622 __Algo = asAlgorithm
1624 if asDict is None and asScript is not None:
1625 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1629 if __Dict is not None:
1630 self.__P.update( dict(__Dict) )
1632 if __Algo is not None:
1633 self.__P.update( {"Algorithm":str(__Algo)} )
1635 def get(self, key = None):
1636 "Vérifie l'existence d'une clé de variable ou de paramètres"
1638 return self.__P[key]
1642 # ==============================================================================
1643 class DataObserver(object):
1645 Classe générale d'interface de type observer
1648 name = "GenericObserver",
1660 self.__name = str(name)
1665 if onVariable is None:
1666 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1667 elif type(onVariable) in (tuple, list):
1668 self.__V = tuple(map( str, onVariable ))
1669 if withInfo is None:
1672 self.__I = (str(withInfo),)*len(self.__V)
1673 elif isinstance(onVariable, str):
1674 self.__V = (onVariable,)
1675 if withInfo is None:
1676 self.__I = (onVariable,)
1678 self.__I = (str(withInfo),)
1680 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1682 if asObsObject is not None:
1683 self.__O = asObsObject
1685 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1686 __Function = Observer2Func(__FunctionText)
1687 self.__O = __Function.getfunc()
1689 for k in range(len(self.__V)):
1692 if ename not in withAlgo:
1693 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1695 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1698 "x.__repr__() <==> repr(x)"
1699 return repr(self.__V)+"\n"+repr(self.__O)
1702 "x.__str__() <==> str(x)"
1703 return str(self.__V)+"\n"+str(self.__O)
1705 # ==============================================================================
1706 class UserScript(object):
1708 Classe générale d'interface de type texte de script utilisateur
1711 name = "GenericUserScript",
1718 self.__name = str(name)
1720 if asString is not None:
1722 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1723 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1724 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1725 self.__F = Templates.ObserverTemplates[asTemplate]
1726 elif asScript is not None:
1727 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1732 "x.__repr__() <==> repr(x)"
1733 return repr(self.__F)
1736 "x.__str__() <==> str(x)"
1737 return str(self.__F)
1739 # ==============================================================================
1740 class ExternalParameters(object):
1742 Classe générale d'interface de type texte de script utilisateur
1745 name = "GenericExternalParameters",
1751 self.__name = str(name)
1754 self.updateParameters( asDict, asScript )
1756 def updateParameters(self,
1760 "Mise a jour des parametres"
1761 if asDict is None and asScript is not None:
1762 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1766 if __Dict is not None:
1767 self.__P.update( dict(__Dict) )
1769 def get(self, key = None):
1771 return self.__P[key]
1773 return list(self.__P.keys())
1776 return list(self.__P.keys())
1778 def pop(self, k, d):
1779 return self.__P.pop(k, d)
1782 return self.__P.items()
1784 def __contains__(self, key=None):
1785 "D.__contains__(k) -> True if D has a key k, else False"
1786 return key in self.__P
1788 # ==============================================================================
1789 class State(object):
1791 Classe générale d'interface de type état
1794 name = "GenericVector",
1796 asPersistentVector = None,
1802 toBeChecked = False,
1805 Permet de définir un vecteur :
1806 - asVector : entrée des données, comme un vecteur compatible avec le
1807 constructeur de numpy.matrix, ou "True" si entrée par script.
1808 - asPersistentVector : entrée des données, comme une série de vecteurs
1809 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1810 type Persistence, ou "True" si entrée par script.
1811 - asScript : si un script valide est donné contenant une variable
1812 nommée "name", la variable est de type "asVector" (par défaut) ou
1813 "asPersistentVector" selon que l'une de ces variables est placée à
1815 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1816 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1817 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1818 nommée "name"), on récupère les colonnes et on les range ligne après
1819 ligne (colMajor=False, par défaut) ou colonne après colonne
1820 (colMajor=True). La variable résultante est de type "asVector" (par
1821 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1824 self.__name = str(name)
1825 self.__check = bool(toBeChecked)
1829 self.__is_vector = False
1830 self.__is_series = False
1832 if asScript is not None:
1833 __Vector, __Series = None, None
1834 if asPersistentVector:
1835 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1837 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1838 elif asDataFile is not None:
1839 __Vector, __Series = None, None
1840 if asPersistentVector:
1841 if colNames is not None:
1842 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1844 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1845 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1846 __Series = numpy.transpose(__Series)
1847 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1848 __Series = numpy.transpose(__Series)
1850 if colNames is not None:
1851 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1853 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1855 __Vector = numpy.ravel(__Vector, order = "F")
1857 __Vector = numpy.ravel(__Vector, order = "C")
1859 __Vector, __Series = asVector, asPersistentVector
1861 if __Vector is not None:
1862 self.__is_vector = True
1863 if isinstance(__Vector, str):
1864 __Vector = PlatformInfo.strvect2liststr( __Vector )
1865 self.__V = numpy.ravel(numpy.asarray( __Vector, dtype=float )).reshape((-1,1))
1866 self.shape = self.__V.shape
1867 self.size = self.__V.size
1868 elif __Series is not None:
1869 self.__is_series = True
1870 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1871 self.__V = Persistence.OneVector(self.__name)
1872 if isinstance(__Series, str):
1873 __Series = PlatformInfo.strmatrix2liststr(__Series)
1874 for member in __Series:
1875 if isinstance(member, str):
1876 member = PlatformInfo.strvect2liststr( member )
1877 self.__V.store(numpy.asarray( member, dtype=float ))
1880 if isinstance(self.__V.shape, (tuple, list)):
1881 self.shape = self.__V.shape
1883 self.shape = self.__V.shape()
1884 if len(self.shape) == 1:
1885 self.shape = (self.shape[0],1)
1886 self.size = self.shape[0] * self.shape[1]
1889 "The %s object is improperly defined or undefined,"%self.__name+\
1890 " it requires at minima either a vector, a list/tuple of"+\
1891 " vectors or a persistent object. Please check your vector input.")
1893 if scheduledBy is not None:
1894 self.__T = scheduledBy
1896 def getO(self, withScheduler=False):
1898 return self.__V, self.__T
1899 elif self.__T is None:
1905 "Vérification du type interne"
1906 return self.__is_vector
1909 "Vérification du type interne"
1910 return self.__is_series
1913 "x.__repr__() <==> repr(x)"
1914 return repr(self.__V)
1917 "x.__str__() <==> str(x)"
1918 return str(self.__V)
1920 # ==============================================================================
1921 class Covariance(object):
1923 Classe générale d'interface de type covariance
1926 name = "GenericCovariance",
1927 asCovariance = None,
1928 asEyeByScalar = None,
1929 asEyeByVector = None,
1932 toBeChecked = False,
1935 Permet de définir une covariance :
1936 - asCovariance : entrée des données, comme une matrice compatible avec
1937 le constructeur de numpy.matrix
1938 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1939 multiplicatif d'une matrice de corrélation identité, aucune matrice
1940 n'étant donc explicitement à donner
1941 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1942 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1943 n'étant donc explicitement à donner
1944 - asCovObject : entrée des données comme un objet python, qui a les
1945 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1946 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1947 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1948 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1949 pleine doit être vérifié
1951 self.__name = str(name)
1952 self.__check = bool(toBeChecked)
1955 self.__is_scalar = False
1956 self.__is_vector = False
1957 self.__is_matrix = False
1958 self.__is_object = False
1960 if asScript is not None:
1961 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1963 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1965 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1967 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1969 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1971 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1973 if __Scalar is not None:
1974 if isinstance(__Scalar, str):
1975 __Scalar = PlatformInfo.strvect2liststr( __Scalar )
1976 if len(__Scalar) > 0: __Scalar = __Scalar[0]
1977 if numpy.array(__Scalar).size != 1:
1979 " The diagonal multiplier given to define a sparse matrix is"+\
1980 " not a unique scalar value.\n Its actual measured size is"+\
1981 " %i. Please check your scalar input."%numpy.array(__Scalar).size)
1982 self.__is_scalar = True
1983 self.__C = numpy.abs( float(__Scalar) )
1986 elif __Vector is not None:
1987 if isinstance(__Vector, str):
1988 __Vector = PlatformInfo.strvect2liststr( __Vector )
1989 self.__is_vector = True
1990 self.__C = numpy.abs( numpy.ravel(numpy.asarray( __Vector, dtype=float )) )
1991 self.shape = (self.__C.size,self.__C.size)
1992 self.size = self.__C.size**2
1993 elif __Matrix is not None:
1994 self.__is_matrix = True
1995 self.__C = numpy.matrix( __Matrix, float )
1996 self.shape = self.__C.shape
1997 self.size = self.__C.size
1998 elif __Object is not None:
1999 self.__is_object = True
2001 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__matmul__","__mul__","__rmatmul__","__rmul__"):
2002 if not hasattr(self.__C,at):
2003 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
2004 if hasattr(self.__C,"shape"):
2005 self.shape = self.__C.shape
2008 if hasattr(self.__C,"size"):
2009 self.size = self.__C.size
2017 def __validate(self):
2019 if self.__C is None:
2020 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
2021 if self.ismatrix() and min(self.shape) != max(self.shape):
2022 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
2023 if self.isobject() and min(self.shape) != max(self.shape):
2024 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
2025 if self.isscalar() and self.__C <= 0:
2026 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
2027 if self.isvector() and (self.__C <= 0).any():
2028 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
2029 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
2031 numpy.linalg.cholesky( self.__C )
2033 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
2034 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
2038 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
2041 "Vérification du type interne"
2042 return self.__is_scalar
2045 "Vérification du type interne"
2046 return self.__is_vector
2049 "Vérification du type interne"
2050 return self.__is_matrix
2053 "Vérification du type interne"
2054 return self.__is_object
2059 return Covariance(self.__name+"I", asCovariance = numpy.linalg.inv(self.__C) )
2060 elif self.isvector():
2061 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
2062 elif self.isscalar():
2063 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
2064 elif self.isobject() and hasattr(self.__C,"getI"):
2065 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
2067 return None # Indispensable
2072 return Covariance(self.__name+"T", asCovariance = self.__C.T )
2073 elif self.isvector():
2074 return Covariance(self.__name+"T", asEyeByVector = self.__C )
2075 elif self.isscalar():
2076 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
2077 elif self.isobject() and hasattr(self.__C,"getT"):
2078 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
2080 raise AttributeError("the %s covariance matrix has no getT attribute."%(self.__name,))
2083 "Décomposition de Cholesky"
2085 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
2086 elif self.isvector():
2087 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
2088 elif self.isscalar():
2089 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
2090 elif self.isobject() and hasattr(self.__C,"cholesky"):
2091 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
2093 raise AttributeError("the %s covariance matrix has no cholesky attribute."%(self.__name,))
2095 def choleskyI(self):
2096 "Inversion de la décomposition de Cholesky"
2098 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.linalg.cholesky(self.__C)) )
2099 elif self.isvector():
2100 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
2101 elif self.isscalar():
2102 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
2103 elif self.isobject() and hasattr(self.__C,"choleskyI"):
2104 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
2106 raise AttributeError("the %s covariance matrix has no choleskyI attribute."%(self.__name,))
2109 "Racine carrée matricielle"
2112 return Covariance(self.__name+"C", asCovariance = numpy.real(scipy.linalg.sqrtm(self.__C)) )
2113 elif self.isvector():
2114 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
2115 elif self.isscalar():
2116 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
2117 elif self.isobject() and hasattr(self.__C,"sqrtm"):
2118 return Covariance(self.__name+"C", asCovObject = self.__C.sqrtm() )
2120 raise AttributeError("the %s covariance matrix has no sqrtm attribute."%(self.__name,))
2123 "Inversion de la racine carrée matricielle"
2126 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.real(scipy.linalg.sqrtm(self.__C))) )
2127 elif self.isvector():
2128 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
2129 elif self.isscalar():
2130 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
2131 elif self.isobject() and hasattr(self.__C,"sqrtmI"):
2132 return Covariance(self.__name+"H", asCovObject = self.__C.sqrtmI() )
2134 raise AttributeError("the %s covariance matrix has no sqrtmI attribute."%(self.__name,))
2136 def diag(self, msize=None):
2137 "Diagonale de la matrice"
2139 return numpy.diag(self.__C)
2140 elif self.isvector():
2142 elif self.isscalar():
2144 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2146 return self.__C * numpy.ones(int(msize))
2147 elif self.isobject() and hasattr(self.__C,"diag"):
2148 return self.__C.diag()
2150 raise AttributeError("the %s covariance matrix has no diag attribute."%(self.__name,))
2152 def trace(self, msize=None):
2153 "Trace de la matrice"
2155 return numpy.trace(self.__C)
2156 elif self.isvector():
2157 return float(numpy.sum(self.__C))
2158 elif self.isscalar():
2160 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2162 return self.__C * int(msize)
2163 elif self.isobject():
2164 return self.__C.trace()
2166 raise AttributeError("the %s covariance matrix has no trace attribute."%(self.__name,))
2168 def asfullmatrix(self, msize=None):
2171 return numpy.asarray(self.__C, dtype=float)
2172 elif self.isvector():
2173 return numpy.asarray( numpy.diag(self.__C), dtype=float )
2174 elif self.isscalar():
2176 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2178 return numpy.asarray( self.__C * numpy.eye(int(msize)), dtype=float )
2179 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
2180 return self.__C.asfullmatrix()
2182 raise AttributeError("the %s covariance matrix has no asfullmatrix attribute."%(self.__name,))
2184 def assparsematrix(self):
2192 "x.__repr__() <==> repr(x)"
2193 return repr(self.__C)
2196 "x.__str__() <==> str(x)"
2197 return str(self.__C)
2199 def __add__(self, other):
2200 "x.__add__(y) <==> x+y"
2201 if self.ismatrix() or self.isobject():
2202 return self.__C + numpy.asmatrix(other)
2203 elif self.isvector() or self.isscalar():
2204 _A = numpy.asarray(other)
2205 if len(_A.shape) == 1:
2206 _A.reshape((-1,1))[::2] += self.__C
2208 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
2209 return numpy.asmatrix(_A)
2211 def __radd__(self, other):
2212 "x.__radd__(y) <==> y+x"
2213 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
2215 def __sub__(self, other):
2216 "x.__sub__(y) <==> x-y"
2217 if self.ismatrix() or self.isobject():
2218 return self.__C - numpy.asmatrix(other)
2219 elif self.isvector() or self.isscalar():
2220 _A = numpy.asarray(other)
2221 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
2222 return numpy.asmatrix(_A)
2224 def __rsub__(self, other):
2225 "x.__rsub__(y) <==> y-x"
2226 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
2229 "x.__neg__() <==> -x"
2232 def __matmul__(self, other):
2233 "x.__mul__(y) <==> x@y"
2234 if self.ismatrix() and isinstance(other, (int, float)):
2235 return numpy.asarray(self.__C) * other
2236 elif self.ismatrix() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2237 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2238 return numpy.ravel(self.__C @ numpy.ravel(other))
2239 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2240 return numpy.asarray(self.__C) @ numpy.asarray(other)
2242 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asarray(other).shape,self.__name))
2243 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2244 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2245 return numpy.ravel(self.__C) * numpy.ravel(other)
2246 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2247 return numpy.ravel(self.__C).reshape((-1,1)) * numpy.asarray(other)
2249 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2250 elif self.isscalar() and isinstance(other,numpy.matrix):
2251 return numpy.asarray(self.__C * other)
2252 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2253 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2254 return self.__C * numpy.ravel(other)
2256 return self.__C * numpy.asarray(other)
2257 elif self.isobject():
2258 return self.__C.__matmul__(other)
2260 raise NotImplementedError("%s covariance matrix __matmul__ method not available for %s type!"%(self.__name,type(other)))
2262 def __mul__(self, other):
2263 "x.__mul__(y) <==> x*y"
2264 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2265 return self.__C * other
2266 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2267 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2268 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2269 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2270 return self.__C * numpy.asmatrix(other)
2273 "operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
2274 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2275 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2276 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
2277 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2278 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
2281 "operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2282 elif self.isscalar() and isinstance(other,numpy.matrix):
2283 return self.__C * other
2284 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2285 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2286 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2288 return self.__C * numpy.asmatrix(other)
2289 elif self.isobject():
2290 return self.__C.__mul__(other)
2292 raise NotImplementedError(
2293 "%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
2295 def __rmatmul__(self, other):
2296 "x.__rmul__(y) <==> y@x"
2297 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2298 return other * self.__C
2299 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2300 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2301 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2302 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2303 return numpy.asmatrix(other) * self.__C
2306 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2307 elif self.isvector() and isinstance(other,numpy.matrix):
2308 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2309 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2310 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2311 return numpy.asmatrix(numpy.array(other) * self.__C)
2314 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2315 elif self.isscalar() and isinstance(other,numpy.matrix):
2316 return other * self.__C
2317 elif self.isobject():
2318 return self.__C.__rmatmul__(other)
2320 raise NotImplementedError(
2321 "%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name,type(other)))
2323 def __rmul__(self, other):
2324 "x.__rmul__(y) <==> y*x"
2325 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2326 return other * self.__C
2327 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2328 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2329 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2330 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2331 return numpy.asmatrix(other) * self.__C
2334 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2335 elif self.isvector() and isinstance(other,numpy.matrix):
2336 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2337 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2338 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2339 return numpy.asmatrix(numpy.array(other) * self.__C)
2342 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2343 elif self.isscalar() and isinstance(other,numpy.matrix):
2344 return other * self.__C
2345 elif self.isscalar() and isinstance(other,float):
2346 return other * self.__C
2347 elif self.isobject():
2348 return self.__C.__rmul__(other)
2350 raise NotImplementedError(
2351 "%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
2354 "x.__len__() <==> len(x)"
2355 return self.shape[0]
2357 # ==============================================================================
2358 class Observer2Func(object):
2360 Création d'une fonction d'observateur a partir de son texte
2362 def __init__(self, corps=""):
2363 self.__corps = corps
2364 def func(self,var,info):
2365 "Fonction d'observation"
2368 "Restitution du pointeur de fonction dans l'objet"
2371 # ==============================================================================
2372 class CaseLogger(object):
2374 Conservation des commandes de création d'un cas
2376 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
2377 self.__name = str(__name)
2378 self.__objname = str(__objname)
2379 self.__logSerie = []
2380 self.__switchoff = False
2382 "TUI" :Interfaces._TUIViewer,
2383 "SCD" :Interfaces._SCDViewer,
2384 "YACS":Interfaces._YACSViewer,
2385 "SimpleReportInRst":Interfaces._SimpleReportInRstViewer,
2386 "SimpleReportInHtml":Interfaces._SimpleReportInHtmlViewer,
2387 "SimpleReportInPlainTxt":Interfaces._SimpleReportInPlainTxtViewer,
2390 "TUI" :Interfaces._TUIViewer,
2391 "COM" :Interfaces._COMViewer,
2393 if __addViewers is not None:
2394 self.__viewers.update(dict(__addViewers))
2395 if __addLoaders is not None:
2396 self.__loaders.update(dict(__addLoaders))
2398 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
2399 "Enregistrement d'une commande individuelle"
2400 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
2401 if "self" in __keys: __keys.remove("self")
2402 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
2404 self.__switchoff = True
2406 self.__switchoff = False
2408 def dump(self, __filename=None, __format="TUI", __upa=""):
2409 "Restitution normalisée des commandes"
2410 if __format in self.__viewers:
2411 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2413 raise ValueError("Dumping as \"%s\" is not available"%__format)
2414 return __formater.dump(__filename, __upa)
2416 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2417 "Chargement normalisé des commandes"
2418 if __format in self.__loaders:
2419 __formater = self.__loaders[__format]()
2421 raise ValueError("Loading as \"%s\" is not available"%__format)
2422 return __formater.load(__filename, __content, __object)
2424 # ==============================================================================
2427 _extraArguments = None,
2428 _sFunction = lambda x: x,
2433 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2434 correspondante de valeurs de la fonction en argument
2436 # Vérifications et définitions initiales
2437 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2438 if not PlatformInfo.isIterable( __xserie ):
2439 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2441 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2444 __mpWorkers = int(_mpWorkers)
2446 import multiprocessing
2457 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2458 with multiprocessing.Pool(__mpWorkers) as pool:
2459 __multiHX = pool.map( _sFunction, _jobs )
2462 # logging.debug("MULTF Internal multiprocessing calculation end")
2464 # logging.debug("MULTF Internal monoprocessing calculation begin")
2466 if _extraArguments is None:
2467 for __xvalue in __xserie:
2468 __multiHX.append( _sFunction( __xvalue ) )
2469 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2470 for __xvalue in __xserie:
2471 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2472 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2473 for __xvalue in __xserie:
2474 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2476 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2477 # logging.debug("MULTF Internal monoprocessing calculation end")
2479 # logging.debug("MULTF Internal multifonction calculations end")
2483 # ==============================================================================
2484 if __name__ == "__main__":
2485 print('\n AUTODIAGNOSTIC\n')