1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2022 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
36 from functools import partial
37 from daCore import Persistence, PlatformInfo, Interfaces
38 from daCore import Templates
40 # ==============================================================================
41 class CacheManager(object):
43 Classe générale de gestion d'un cache de calculs
46 toleranceInRedundancy = 1.e-18,
47 lengthOfRedundancy = -1,
50 Les caractéristiques de tolérance peuvent être modifiées à la création.
52 self.__tolerBP = float(toleranceInRedundancy)
53 self.__lengthOR = int(lengthOfRedundancy)
54 self.__initlnOR = self.__lengthOR
64 def wasCalculatedIn(self, xValue, oName="" ):
65 "Vérifie l'existence d'un calcul correspondant à la valeur"
69 for i in range(min(len(self.__listOPCV),self.__lengthOR)-1,-1,-1):
70 if not hasattr(xValue, 'size'):
72 elif (str(oName) != self.__listOPCV[i][3]):
74 elif (xValue.size != self.__listOPCV[i][0].size):
76 elif (numpy.ravel(xValue)[0] - self.__listOPCV[i][0][0]) > (self.__tolerBP * self.__listOPCV[i][2] / self.__listOPCV[i][0].size):
78 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < (self.__tolerBP * self.__listOPCV[i][2]):
80 __HxV = self.__listOPCV[i][1]
84 def storeValueInX(self, xValue, HxValue, oName="" ):
85 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
86 if self.__lengthOR < 0:
87 self.__lengthOR = 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
88 self.__initlnOR = self.__lengthOR
89 self.__seenNames.append(str(oName))
90 if str(oName) not in self.__seenNames: # Etend la liste si nouveau
91 self.__lengthOR += 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
92 self.__initlnOR += self.__lengthOR
93 self.__seenNames.append(str(oName))
94 while len(self.__listOPCV) > self.__lengthOR:
95 self.__listOPCV.pop(0)
96 self.__listOPCV.append( (
97 copy.copy(numpy.ravel(xValue)), # 0 Previous point
98 copy.copy(HxValue), # 1 Previous value
99 numpy.linalg.norm(xValue), # 2 Norm
100 str(oName), # 3 Operator name
105 self.__initlnOR = self.__lengthOR
107 self.__enabled = False
111 self.__lengthOR = self.__initlnOR
112 self.__enabled = True
114 # ==============================================================================
115 class Operator(object):
117 Classe générale d'interface de type opérateur simple
125 name = "GenericOperator",
128 avoidingRedundancy = True,
129 reducingMemoryUse = False,
130 inputAsMultiFunction = False,
131 enableMultiProcess = False,
132 extraArguments = None,
135 On construit un objet de ce type en fournissant, à l'aide de l'un des
136 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
139 - name : nom d'opérateur
140 - fromMethod : argument de type fonction Python
141 - fromMatrix : argument adapté au constructeur numpy.array/matrix
142 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
143 - reducingMemoryUse : booléen forçant (ou pas) des calculs moins
145 - inputAsMultiFunction : booléen indiquant une fonction explicitement
146 définie (ou pas) en multi-fonction
147 - extraArguments : arguments supplémentaires passés à la fonction de
148 base et ses dérivées (tuple ou dictionnaire)
150 self.__name = str(name)
151 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
152 self.__reduceM = bool( reducingMemoryUse )
153 self.__avoidRC = bool( avoidingRedundancy )
154 self.__inputAsMF = bool( inputAsMultiFunction )
155 self.__mpEnabled = bool( enableMultiProcess )
156 self.__extraArgs = extraArguments
157 if fromMethod is not None and self.__inputAsMF:
158 self.__Method = fromMethod # logtimer(fromMethod)
160 self.__Type = "Method"
161 elif fromMethod is not None and not self.__inputAsMF:
162 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
164 self.__Type = "Method"
165 elif fromMatrix is not None:
167 if isinstance(fromMatrix, str):
168 fromMatrix = PlatformInfo.strmatrix2liststr( fromMatrix )
169 self.__Matrix = numpy.asarray( fromMatrix, dtype=float )
170 self.__Type = "Matrix"
176 def disableAvoidingRedundancy(self):
178 Operator.CM.disable()
180 def enableAvoidingRedundancy(self):
185 Operator.CM.disable()
191 def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False):
193 Permet de restituer le résultat de l'application de l'opérateur à une
194 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
195 argument devant a priori être du bon type.
197 - les arguments par série sont :
198 - xValue : argument adapté pour appliquer l'opérateur
199 - HValue : valeur précalculée de l'opérateur en ce point
200 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
207 if HValue is not None:
211 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
213 if _HValue is not None:
214 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
216 for i in range(len(_HValue)):
217 _HxValue.append( _HValue[i] )
219 Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name)
224 for i, xv in enumerate(_xValue):
226 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
228 __alreadyCalculated = False
230 if __alreadyCalculated:
231 self.__addOneCacheCall()
234 if self.__Matrix is not None:
235 self.__addOneMatrixCall()
236 _hv = self.__Matrix @ numpy.ravel(xv)
238 self.__addOneMethodCall()
242 _HxValue.append( _hv )
244 if len(_xserie)>0 and self.__Matrix is None:
245 if self.__extraArgs is None:
246 _hserie = self.__Method( _xserie ) # Calcul MF
248 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
249 if not hasattr(_hserie, "pop"):
251 "The user input multi-function doesn't seem to return a"+\
252 " result sequence, behaving like a mono-function. It has"+\
260 Operator.CM.storeValueInX(_xv,_hv,self.__name)
262 if returnSerieAsArrayMatrix:
263 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
265 if argsAsSerie: return _HxValue
266 else: return _HxValue[-1]
268 def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
270 Permet de restituer le résultat de l'application de l'opérateur à des
271 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
272 argument devant a priori être du bon type. Si la uValue est None,
273 on suppose que l'opérateur ne s'applique qu'à xValue.
275 - paires : les arguments par paire sont :
276 - xValue : argument X adapté pour appliquer l'opérateur
277 - uValue : argument U adapté pour appliquer l'opérateur
278 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
280 if argsAsSerie: _xuValue = paires
281 else: _xuValue = (paires,)
282 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
284 if self.__Matrix is not None:
286 for paire in _xuValue:
287 _xValue, _uValue = paire
288 self.__addOneMatrixCall()
289 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
292 for paire in _xuValue:
293 _xValue, _uValue = paire
294 if _uValue is not None:
295 _xuArgs.append( paire )
297 _xuArgs.append( _xValue )
298 self.__addOneMethodCall( len(_xuArgs) )
299 if self.__extraArgs is None:
300 _HxValue = self.__Method( _xuArgs ) # Calcul MF
302 _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF
304 if returnSerieAsArrayMatrix:
305 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
307 if argsAsSerie: return _HxValue
308 else: return _HxValue[-1]
310 def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
312 Permet de restituer le résultat de l'application de l'opérateur à une
313 série d'arguments xValue, sachant que l'opérateur est valable en
314 xNominal. Cette méthode se contente d'appliquer, son argument devant a
315 priori être du bon type. Si l'opérateur est linéaire car c'est une
316 matrice, alors il est valable en tout point nominal et xNominal peut
317 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
318 permet d'indiquer que l'argument est multi-paires.
320 - paires : les arguments par paire sont :
321 - xNominal : série d'arguments permettant de donner le point où
322 l'opérateur est construit pour être ensuite appliqué
323 - xValue : série d'arguments adaptés pour appliquer l'opérateur
324 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
326 if argsAsSerie: _nxValue = paires
327 else: _nxValue = (paires,)
328 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
330 if self.__Matrix is not None:
332 for paire in _nxValue:
333 _xNominal, _xValue = paire
334 self.__addOneMatrixCall()
335 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
337 self.__addOneMethodCall( len(_nxValue) )
338 if self.__extraArgs is None:
339 _HxValue = self.__Method( _nxValue ) # Calcul MF
341 _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
343 if returnSerieAsArrayMatrix:
344 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
346 if argsAsSerie: return _HxValue
347 else: return _HxValue[-1]
349 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
351 Permet de renvoyer l'opérateur sous la forme d'une matrice
353 if self.__Matrix is not None:
354 self.__addOneMatrixCall()
355 mValue = [self.__Matrix,]
356 elif not isinstance(ValueForMethodForm,str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
359 self.__addOneMethodCall( len(ValueForMethodForm) )
360 for _vfmf in ValueForMethodForm:
361 mValue.append( self.__Method(((_vfmf, None),)) )
363 self.__addOneMethodCall()
364 mValue = self.__Method(((ValueForMethodForm, None),))
366 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
368 if argsAsSerie: return mValue
369 else: return mValue[-1]
373 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
374 la forme d'une matrice
376 if self.__Matrix is not None:
377 return self.__Matrix.shape
379 raise ValueError("Matrix form of the operator is not available, nor the shape")
381 def nbcalls(self, which=None):
383 Renvoie les nombres d'évaluations de l'opérateur
386 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
387 self.__NbCallsAsMatrix,
388 self.__NbCallsAsMethod,
389 self.__NbCallsOfCached,
390 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
391 Operator.NbCallsAsMatrix,
392 Operator.NbCallsAsMethod,
393 Operator.NbCallsOfCached,
395 if which is None: return __nbcalls
396 else: return __nbcalls[which]
398 def __addOneMatrixCall(self):
399 "Comptabilise un appel"
400 self.__NbCallsAsMatrix += 1 # Decompte local
401 Operator.NbCallsAsMatrix += 1 # Decompte global
403 def __addOneMethodCall(self, nb = 1):
404 "Comptabilise un appel"
405 self.__NbCallsAsMethod += nb # Decompte local
406 Operator.NbCallsAsMethod += nb # Decompte global
408 def __addOneCacheCall(self):
409 "Comptabilise un appel"
410 self.__NbCallsOfCached += 1 # Decompte local
411 Operator.NbCallsOfCached += 1 # Decompte global
413 # ==============================================================================
414 class FullOperator(object):
416 Classe générale d'interface de type opérateur complet
417 (Direct, Linéaire Tangent, Adjoint)
420 name = "GenericFullOperator",
422 asOneFunction = None, # 1 Fonction
423 asThreeFunctions = None, # 3 Fonctions in a dictionary
424 asScript = None, # 1 or 3 Fonction(s) by script
425 asDict = None, # Parameters
427 extraArguments = None,
428 performancePrf = None,
429 inputAsMF = False,# Fonction(s) as Multi-Functions
434 self.__name = str(name)
435 self.__check = bool(toBeChecked)
436 self.__extraArgs = extraArguments
441 if (asDict is not None) and isinstance(asDict, dict):
442 __Parameters.update( asDict )
443 # Priorité à EnableMultiProcessingInDerivatives=True
444 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
445 __Parameters["EnableMultiProcessingInDerivatives"] = True
446 __Parameters["EnableMultiProcessingInEvaluation"] = False
447 if "EnableMultiProcessingInDerivatives" not in __Parameters:
448 __Parameters["EnableMultiProcessingInDerivatives"] = False
449 if __Parameters["EnableMultiProcessingInDerivatives"]:
450 __Parameters["EnableMultiProcessingInEvaluation"] = False
451 if "EnableMultiProcessingInEvaluation" not in __Parameters:
452 __Parameters["EnableMultiProcessingInEvaluation"] = False
453 if "withIncrement" in __Parameters: # Temporaire
454 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
455 # Le défaut est équivalent à "ReducedOverallRequirements"
456 __reduceM, __avoidRC = True, True
457 if performancePrf is not None:
458 if performancePrf == "ReducedAmountOfCalculation":
459 __reduceM, __avoidRC = False, True
460 elif performancePrf == "ReducedMemoryFootprint":
461 __reduceM, __avoidRC = True, False
462 elif performancePrf == "NoSavings":
463 __reduceM, __avoidRC = False, False
465 if asScript is not None:
466 __Matrix, __Function = None, None
468 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
470 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
471 __Function.update({"useApproximatedDerivatives":True})
472 __Function.update(__Parameters)
473 elif asThreeFunctions:
475 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
476 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
477 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
479 __Function.update(__Parameters)
482 if asOneFunction is not None:
483 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
484 if asOneFunction["Direct"] is not None:
485 __Function = asOneFunction
487 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
489 __Function = { "Direct":asOneFunction }
490 __Function.update({"useApproximatedDerivatives":True})
491 __Function.update(__Parameters)
492 elif asThreeFunctions is not None:
493 if isinstance(asThreeFunctions, dict) and \
494 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
495 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
496 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
497 __Function = asThreeFunctions
498 elif isinstance(asThreeFunctions, dict) and \
499 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
500 __Function = asThreeFunctions
501 __Function.update({"useApproximatedDerivatives":True})
504 "The functions has to be given in a dictionnary which have either"+\
505 " 1 key (\"Direct\") or"+\
506 " 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
507 if "Direct" not in asThreeFunctions:
508 __Function["Direct"] = asThreeFunctions["Tangent"]
509 __Function.update(__Parameters)
513 if appliedInX is not None and isinstance(appliedInX, dict):
514 __appliedInX = appliedInX
515 elif appliedInX is not None:
516 __appliedInX = {"HXb":appliedInX}
520 if scheduledBy is not None:
521 self.__T = scheduledBy
523 if isinstance(__Function, dict) and \
524 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
525 ("Direct" in __Function) and (__Function["Direct"] is not None):
526 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
527 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
528 if "withdX" not in __Function: __Function["withdX"] = None
529 if "withReducingMemoryUse" not in __Function: __Function["withReducingMemoryUse"] = __reduceM
530 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = __avoidRC
531 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
532 if "withLengthOfRedundancy" not in __Function: __Function["withLengthOfRedundancy"] = -1
533 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
534 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
535 from daCore import NumericObjects
536 FDA = NumericObjects.FDApproximation(
538 Function = __Function["Direct"],
539 centeredDF = __Function["CenteredFiniteDifference"],
540 increment = __Function["DifferentialIncrement"],
541 dX = __Function["withdX"],
542 extraArguments = self.__extraArgs,
543 reducingMemoryUse = __Function["withReducingMemoryUse"],
544 avoidingRedundancy = __Function["withAvoidingRedundancy"],
545 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
546 lengthOfRedundancy = __Function["withLengthOfRedundancy"],
547 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
548 mpWorkers = __Function["NumberOfProcesses"],
549 mfEnabled = __Function["withmfEnabled"],
551 self.__FO["Direct"] = Operator(
553 fromMethod = FDA.DirectOperator,
554 reducingMemoryUse = __reduceM,
555 avoidingRedundancy = __avoidRC,
556 inputAsMultiFunction = inputAsMF,
557 extraArguments = self.__extraArgs,
558 enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
559 self.__FO["Tangent"] = Operator(
560 name = self.__name+"Tangent",
561 fromMethod = FDA.TangentOperator,
562 reducingMemoryUse = __reduceM,
563 avoidingRedundancy = __avoidRC,
564 inputAsMultiFunction = inputAsMF,
565 extraArguments = self.__extraArgs )
566 self.__FO["Adjoint"] = Operator(
567 name = self.__name+"Adjoint",
568 fromMethod = FDA.AdjointOperator,
569 reducingMemoryUse = __reduceM,
570 avoidingRedundancy = __avoidRC,
571 inputAsMultiFunction = inputAsMF,
572 extraArguments = self.__extraArgs )
573 elif isinstance(__Function, dict) and \
574 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
575 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
576 self.__FO["Direct"] = Operator(
578 fromMethod = __Function["Direct"],
579 reducingMemoryUse = __reduceM,
580 avoidingRedundancy = __avoidRC,
581 inputAsMultiFunction = inputAsMF,
582 extraArguments = self.__extraArgs,
583 enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
584 self.__FO["Tangent"] = Operator(
585 name = self.__name+"Tangent",
586 fromMethod = __Function["Tangent"],
587 reducingMemoryUse = __reduceM,
588 avoidingRedundancy = __avoidRC,
589 inputAsMultiFunction = inputAsMF,
590 extraArguments = self.__extraArgs )
591 self.__FO["Adjoint"] = Operator(
592 name = self.__name+"Adjoint",
593 fromMethod = __Function["Adjoint"],
594 reducingMemoryUse = __reduceM,
595 avoidingRedundancy = __avoidRC,
596 inputAsMultiFunction = inputAsMF,
597 extraArguments = self.__extraArgs )
598 elif asMatrix is not None:
599 if isinstance(__Matrix, str):
600 __Matrix = PlatformInfo.strmatrix2liststr( __Matrix )
601 __matrice = numpy.asarray( __Matrix, dtype=float )
602 self.__FO["Direct"] = Operator(
604 fromMatrix = __matrice,
605 reducingMemoryUse = __reduceM,
606 avoidingRedundancy = __avoidRC,
607 inputAsMultiFunction = inputAsMF,
608 enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
609 self.__FO["Tangent"] = Operator(
610 name = self.__name+"Tangent",
611 fromMatrix = __matrice,
612 reducingMemoryUse = __reduceM,
613 avoidingRedundancy = __avoidRC,
614 inputAsMultiFunction = inputAsMF )
615 self.__FO["Adjoint"] = Operator(
616 name = self.__name+"Adjoint",
617 fromMatrix = __matrice.T,
618 reducingMemoryUse = __reduceM,
619 avoidingRedundancy = __avoidRC,
620 inputAsMultiFunction = inputAsMF )
624 "The %s object is improperly defined or undefined,"%self.__name+\
625 " it requires at minima either a matrix, a Direct operator for"+\
626 " approximate derivatives or a Tangent/Adjoint operators pair."+\
627 " Please check your operator input.")
629 if __appliedInX is not None:
630 self.__FO["AppliedInX"] = {}
631 for key in __appliedInX:
632 if isinstance(__appliedInX[key], str):
633 __appliedInX[key] = PlatformInfo.strvect2liststr( __appliedInX[key] )
634 self.__FO["AppliedInX"][key] = numpy.ravel( __appliedInX[key] ).reshape((-1,1))
636 self.__FO["AppliedInX"] = None
642 "x.__repr__() <==> repr(x)"
643 return repr(self.__FO)
646 "x.__str__() <==> str(x)"
647 return str(self.__FO)
649 # ==============================================================================
650 class Algorithm(object):
652 Classe générale d'interface de type algorithme
654 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
655 d'assimilation, en fournissant un container (dictionnaire) de variables
656 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
658 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
660 def __init__(self, name):
662 L'initialisation présente permet de fabriquer des variables de stockage
663 disponibles de manière générique dans les algorithmes élémentaires. Ces
664 variables de stockage sont ensuite conservées dans un dictionnaire
665 interne à l'objet, mais auquel on accède par la méthode "get".
667 Les variables prévues sont :
668 - APosterioriCorrelations : matrice de corrélations de la matrice A
669 - APosterioriCovariance : matrice de covariances a posteriori : A
670 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
671 - APosterioriVariances : vecteur des variances de la matrice A
672 - Analysis : vecteur d'analyse : Xa
673 - BMA : Background moins Analysis : Xa - Xb
674 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
675 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
676 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
677 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
678 - CostFunctionJo : partie observations de la fonction-coût : Jo
679 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
680 - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
681 - CurrentOptimum : état optimal courant lors d'itérations
682 - CurrentState : état courant lors d'itérations
683 - CurrentStepNumber : pas courant d'avancement dans les algorithmes en évolution, à partir de 0
684 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
685 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
686 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
687 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
688 - Innovation : l'innovation : d = Y - H(X)
689 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
690 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
691 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
692 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
693 - KalmanGainAtOptimum : gain de Kalman à l'optimum
694 - MahalanobisConsistency : indicateur de consistance des covariances
695 - OMA : Observation moins Analyse : Y - Xa
696 - OMB : Observation moins Background : Y - Xb
697 - ForecastCovariance : covariance de l'état prédit courant lors d'itérations
698 - ForecastState : état prédit courant lors d'itérations
699 - Residu : dans le cas des algorithmes de vérification
700 - SampledStateForQuantiles : échantillons d'états pour l'estimation des quantiles
701 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
702 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
703 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
704 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
705 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
706 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
707 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
708 On peut rajouter des variables à stocker dans l'initialisation de
709 l'algorithme élémentaire qui va hériter de cette classe
711 logging.debug("%s Initialisation", str(name))
712 self._m = PlatformInfo.SystemUsage()
714 self._name = str( name )
715 self._parameters = {"StoreSupplementaryCalculations":[]}
716 self.__internal_state = {}
717 self.__required_parameters = {}
718 self.__required_inputs = {
719 "RequiredInputValues":{"mandatory":(), "optional":()},
720 "ClassificationTags":[],
722 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
723 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
724 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
725 self.__replace_by_the_new_name = {} # Nouveau nom à partir d'un nom ancien
727 self.StoredVariables = {}
728 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
729 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
730 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
731 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
732 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
733 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
734 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
735 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
736 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
737 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
738 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
739 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
740 self.StoredVariables["CurrentEnsembleState"] = Persistence.OneMatrix(name = "CurrentEnsembleState")
741 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
742 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
743 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
744 self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
745 self.StoredVariables["ForecastCovariance"] = Persistence.OneMatrix(name = "ForecastCovariance")
746 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
747 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
748 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
749 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
750 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
751 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
752 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
753 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
754 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
755 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
756 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
757 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
758 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
759 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
760 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
761 self.StoredVariables["OptimalPoints"] = Persistence.OneVector(name = "OptimalPoints")
762 self.StoredVariables["ReducedBasis"] = Persistence.OneMatrix(name = "ReducedBasis")
763 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
764 self.StoredVariables["Residus"] = Persistence.OneVector(name = "Residus")
765 self.StoredVariables["SampledStateForQuantiles"] = Persistence.OneMatrix(name = "SampledStateForQuantiles")
766 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
767 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
768 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
769 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
770 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
771 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
772 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
773 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
775 for k in self.StoredVariables:
776 self.__canonical_stored_name[k.lower()] = k
778 for k, v in self.__variable_names_not_public.items():
779 self.__canonical_parameter_name[k.lower()] = k
780 self.__canonical_parameter_name["algorithm"] = "Algorithm"
781 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
783 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
785 logging.debug("%s Lancement", self._name)
786 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
787 self._getTimeState(reset=True)
789 # Mise a jour des paramètres internes avec le contenu de Parameters, en
790 # reprenant les valeurs par défauts pour toutes celles non définies
791 self.__setParameters(Parameters, reset=True)
792 for k, v in self.__variable_names_not_public.items():
793 if k not in self._parameters: self.__setParameters( {k:v} )
795 # Corrections et compléments des vecteurs
796 def __test_vvalue(argument, variable, argname, symbol=None):
797 if symbol is None: symbol = variable
799 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
800 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol))
801 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
802 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol))
804 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol))
806 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
807 logging.debug("%s %s vector %s is required and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
808 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
809 logging.debug("%s %s vector %s is optional and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
812 "%s %s vector %s is set although neither required nor optional, and its size is %i."%(
813 self._name,argname,symbol,numpy.array(argument).size))
815 __test_vvalue( Xb, "Xb", "Background or initial state" )
816 __test_vvalue( Y, "Y", "Observation" )
817 __test_vvalue( U, "U", "Control" )
819 # Corrections et compléments des covariances
820 def __test_cvalue(argument, variable, argname, symbol=None):
821 if symbol is None: symbol = variable
823 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
824 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol))
825 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
826 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol))
828 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol))
830 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
831 logging.debug("%s %s error covariance matrix %s is required and set."%(self._name,argname,symbol))
832 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
833 logging.debug("%s %s error covariance matrix %s is optional and set."%(self._name,argname,symbol))
835 logging.debug("%s %s error covariance matrix %s is set although neither required nor optional."%(self._name,argname,symbol))
837 __test_cvalue( B, "B", "Background" )
838 __test_cvalue( R, "R", "Observation" )
839 __test_cvalue( Q, "Q", "Evolution" )
841 # Corrections et compléments des opérateurs
842 def __test_ovalue(argument, variable, argname, symbol=None):
843 if symbol is None: symbol = variable
844 if argument is None or (isinstance(argument,dict) and len(argument)==0):
845 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
846 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol))
847 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
848 logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol))
850 logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol))
852 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
853 logging.debug("%s %s operator %s is required and set."%(self._name,argname,symbol))
854 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
855 logging.debug("%s %s operator %s is optional and set."%(self._name,argname,symbol))
857 logging.debug("%s %s operator %s is set although neither required nor optional."%(self._name,argname,symbol))
859 __test_ovalue( HO, "HO", "Observation", "H" )
860 __test_ovalue( EM, "EM", "Evolution", "M" )
861 __test_ovalue( CM, "CM", "Control Model", "C" )
863 # Corrections et compléments des bornes
864 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
865 logging.debug("%s Bounds taken into account"%(self._name,))
867 self._parameters["Bounds"] = None
868 if ("StateBoundsForQuantiles" in self._parameters) \
869 and isinstance(self._parameters["StateBoundsForQuantiles"], (list, tuple)) \
870 and (len(self._parameters["StateBoundsForQuantiles"]) > 0):
871 logging.debug("%s Bounds for quantiles states taken into account"%(self._name,))
872 # Attention : contrairement à Bounds, pas de défaut à None, sinon on ne peut pas être sans bornes
874 # Corrections et compléments de l'initialisation en X
875 if "InitializationPoint" in self._parameters:
877 if self._parameters["InitializationPoint"] is not None and hasattr(self._parameters["InitializationPoint"],'size'):
878 if self._parameters["InitializationPoint"].size != numpy.ravel(Xb).size:
879 raise ValueError("Incompatible size %i of forced initial point that have to replace the background of size %i" \
880 %(self._parameters["InitializationPoint"].size,numpy.ravel(Xb).size))
881 # Obtenu par typecast : numpy.ravel(self._parameters["InitializationPoint"])
883 self._parameters["InitializationPoint"] = numpy.ravel(Xb)
885 if self._parameters["InitializationPoint"] is None:
886 raise ValueError("Forced initial point can not be set without any given Background or required value")
888 # Correction pour pallier a un bug de TNC sur le retour du Minimum
889 if "Minimizer" in self._parameters and self._parameters["Minimizer"] == "TNC":
890 self.setParameterValue("StoreInternalVariables",True)
892 # Verbosité et logging
893 if logging.getLogger().level < logging.WARNING:
894 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
895 self._parameters["optmessages"] = 15
897 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
898 self._parameters["optmessages"] = 0
902 def _post_run(self,_oH=None):
904 if ("StoreSupplementaryCalculations" in self._parameters) and \
905 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
906 for _A in self.StoredVariables["APosterioriCovariance"]:
907 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
908 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
909 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
910 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
911 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
912 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
913 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
914 self.StoredVariables["APosterioriCorrelations"].store( _C )
915 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
917 "%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i",
918 self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
920 "%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i",
921 self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
922 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
923 logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
924 logging.debug("%s Terminé", self._name)
927 def _toStore(self, key):
928 "True if in StoreSupplementaryCalculations, else False"
929 return key in self._parameters["StoreSupplementaryCalculations"]
931 def get(self, key=None):
933 Renvoie l'une des variables stockées identifiée par la clé, ou le
934 dictionnaire de l'ensemble des variables disponibles en l'absence de
935 clé. Ce sont directement les variables sous forme objet qui sont
936 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
937 des classes de persistance.
940 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
942 return self.StoredVariables
944 def __contains__(self, key=None):
945 "D.__contains__(k) -> True if D has a key k, else False"
946 if key is None or key.lower() not in self.__canonical_stored_name:
949 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
952 "D.keys() -> list of D's keys"
953 if hasattr(self, "StoredVariables"):
954 return self.StoredVariables.keys()
959 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
960 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
961 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
966 raise TypeError("pop expected at least 1 arguments, got 0")
967 "If key is not found, d is returned if given, otherwise KeyError is raised"
973 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
975 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
976 sa forme mathématique la plus naturelle possible.
978 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
980 def defineRequiredParameter(self,
992 Permet de définir dans l'algorithme des paramètres requis et leurs
993 caractéristiques par défaut.
996 raise ValueError("A name is mandatory to define a required parameter.")
998 self.__required_parameters[name] = {
1000 "typecast" : typecast,
1003 "listval" : listval,
1004 "listadv" : listadv,
1005 "message" : message,
1006 "oldname" : oldname,
1008 self.__canonical_parameter_name[name.lower()] = name
1009 if oldname is not None:
1010 self.__canonical_parameter_name[oldname.lower()] = name # Conversion
1011 self.__replace_by_the_new_name[oldname.lower()] = name
1012 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
1014 def getRequiredParameters(self, noDetails=True):
1016 Renvoie la liste des noms de paramètres requis ou directement le
1017 dictionnaire des paramètres requis.
1020 return sorted(self.__required_parameters.keys())
1022 return self.__required_parameters
1024 def setParameterValue(self, name=None, value=None):
1026 Renvoie la valeur d'un paramètre requis de manière contrôlée
1028 __k = self.__canonical_parameter_name[name.lower()]
1029 default = self.__required_parameters[__k]["default"]
1030 typecast = self.__required_parameters[__k]["typecast"]
1031 minval = self.__required_parameters[__k]["minval"]
1032 maxval = self.__required_parameters[__k]["maxval"]
1033 listval = self.__required_parameters[__k]["listval"]
1034 listadv = self.__required_parameters[__k]["listadv"]
1036 if value is None and default is None:
1038 elif value is None and default is not None:
1039 if typecast is None: __val = default
1040 else: __val = typecast( default )
1042 if typecast is None: __val = value
1045 __val = typecast( value )
1047 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
1049 if minval is not None and (numpy.array(__val, float) < minval).any():
1050 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
1051 if maxval is not None and (numpy.array(__val, float) > maxval).any():
1052 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
1053 if listval is not None or listadv is not None:
1054 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
1056 if listval is not None and v in listval: continue
1057 elif listadv is not None and v in listadv: continue
1059 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
1060 elif not (listval is not None and __val in listval) and not (listadv is not None and __val in listadv):
1061 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
1065 def requireInputArguments(self, mandatory=(), optional=()):
1067 Permet d'imposer des arguments de calcul requis en entrée.
1069 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
1070 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
1072 def getInputArguments(self):
1074 Permet d'obtenir les listes des arguments de calcul requis en entrée.
1076 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
1078 def setAttributes(self, tags=()):
1080 Permet d'adjoindre des attributs comme les tags de classification.
1081 Renvoie la liste actuelle dans tous les cas.
1083 self.__required_inputs["ClassificationTags"].extend( tags )
1084 return self.__required_inputs["ClassificationTags"]
1086 def __setParameters(self, fromDico={}, reset=False):
1088 Permet de stocker les paramètres reçus dans le dictionnaire interne.
1090 self._parameters.update( fromDico )
1091 __inverse_fromDico_keys = {}
1092 for k in fromDico.keys():
1093 if k.lower() in self.__canonical_parameter_name:
1094 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
1095 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
1096 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
1098 for k in __inverse_fromDico_keys.values():
1099 if k.lower() in self.__replace_by_the_new_name:
1100 __newk = self.__replace_by_the_new_name[k.lower()]
1101 __msg = "the parameter \"%s\" used in \"%s\" algorithm case is deprecated and has to be replaced by \"%s\"."%(k,self._name,__newk)
1102 __msg += " Please update your code."
1103 warnings.warn(__msg, FutureWarning, stacklevel=50)
1105 for k in self.__required_parameters.keys():
1106 if k in __canonic_fromDico_keys:
1107 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
1109 self._parameters[k] = self.setParameterValue(k)
1112 if hasattr(self._parameters[k],"__len__") and len(self._parameters[k]) > 100:
1113 logging.debug("%s %s de longueur %s", self._name, self.__required_parameters[k]["message"], len(self._parameters[k]))
1115 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
1117 def _setInternalState(self, key=None, value=None, fromDico={}, reset=False):
1119 Permet de stocker des variables nommées constituant l'état interne
1121 if reset: # Vide le dictionnaire préalablement
1122 self.__internal_state = {}
1123 if key is not None and value is not None:
1124 self.__internal_state[key] = value
1125 self.__internal_state.update( dict(fromDico) )
1127 def _getInternalState(self, key=None):
1129 Restitue un état interne sous la forme d'un dictionnaire de variables nommées
1131 if key is not None and key in self.__internal_state:
1132 return self.__internal_state[key]
1134 return self.__internal_state
1136 def _getTimeState(self, reset=False):
1138 Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde
1141 self.__initial_cpu_time = time.process_time()
1142 self.__initial_elapsed_time = time.perf_counter()
1145 self.__cpu_time = time.process_time() - self.__initial_cpu_time
1146 self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time
1147 return self.__cpu_time, self.__elapsed_time
1149 def _StopOnTimeLimit(self, X=None, withReason=False):
1150 "Stop criteria on time limit: True/False [+ Reason]"
1151 c, e = self._getTimeState()
1152 if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]:
1153 __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"])
1154 elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]:
1155 __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"])
1157 __SC, __SR = False, ""
1163 # ==============================================================================
1164 class PartialAlgorithm(object):
1166 Classe pour mimer "Algorithm" du point de vue stockage, mais sans aucune
1167 action avancée comme la vérification . Pour les méthodes reprises ici,
1168 le fonctionnement est identique à celles de la classe "Algorithm".
1170 def __init__(self, name):
1171 self._name = str( name )
1172 self._parameters = {"StoreSupplementaryCalculations":[]}
1174 self.StoredVariables = {}
1175 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
1176 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
1177 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
1178 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
1179 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
1180 self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
1182 self.__canonical_stored_name = {}
1183 for k in self.StoredVariables:
1184 self.__canonical_stored_name[k.lower()] = k
1186 def _toStore(self, key):
1187 "True if in StoreSupplementaryCalculations, else False"
1188 return key in self._parameters["StoreSupplementaryCalculations"]
1190 def get(self, key=None):
1192 Renvoie l'une des variables stockées identifiée par la clé, ou le
1193 dictionnaire de l'ensemble des variables disponibles en l'absence de
1194 clé. Ce sont directement les variables sous forme objet qui sont
1195 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
1196 des classes de persistance.
1199 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
1201 return self.StoredVariables
1203 # ==============================================================================
1204 class AlgorithmAndParameters(object):
1206 Classe générale d'interface d'action pour l'algorithme et ses paramètres
1209 name = "GenericAlgorithm",
1216 self.__name = str(name)
1220 self.__algorithm = {}
1221 self.__algorithmFile = None
1222 self.__algorithmName = None
1224 self.updateParameters( asDict, asScript )
1226 if asAlgorithm is None and asScript is not None:
1227 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1229 __Algo = asAlgorithm
1231 if __Algo is not None:
1232 self.__A = str(__Algo)
1233 self.__P.update( {"Algorithm":self.__A} )
1235 self.__setAlgorithm( self.__A )
1237 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
1239 def updateParameters(self,
1243 "Mise a jour des parametres"
1244 if asDict is None and asScript is not None:
1245 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1249 if __Dict is not None:
1250 self.__P.update( dict(__Dict) )
1252 def executePythonScheme(self, asDictAO = None):
1253 "Permet de lancer le calcul d'assimilation"
1254 Operator.CM.clearCache()
1256 if not isinstance(asDictAO, dict):
1257 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1258 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
1259 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
1260 else: self.__Xb = None
1261 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
1262 else: self.__Y = asDictAO["Observation"]
1263 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
1264 else: self.__U = asDictAO["ControlInput"]
1265 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
1266 else: self.__HO = asDictAO["ObservationOperator"]
1267 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
1268 else: self.__EM = asDictAO["EvolutionModel"]
1269 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
1270 else: self.__CM = asDictAO["ControlModel"]
1271 self.__B = asDictAO["BackgroundError"]
1272 self.__R = asDictAO["ObservationError"]
1273 self.__Q = asDictAO["EvolutionError"]
1275 self.__shape_validate()
1277 self.__algorithm.run(
1287 Parameters = self.__P,
1291 def executeYACSScheme(self, FileName=None):
1292 "Permet de lancer le calcul d'assimilation"
1293 if FileName is None or not os.path.exists(FileName):
1294 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1296 __file = os.path.abspath(FileName)
1297 logging.debug("The YACS file name is \"%s\"."%__file)
1298 if not PlatformInfo.has_salome or \
1299 not PlatformInfo.has_yacs or \
1300 not PlatformInfo.has_adao:
1301 raise ImportError("\n\n"+\
1302 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
1303 "Please load the right environnement before trying to use it.\n")
1306 import SALOMERuntime
1308 SALOMERuntime.RuntimeSALOME_setRuntime()
1310 r = pilot.getRuntime()
1311 xmlLoader = loader.YACSLoader()
1312 xmlLoader.registerProcCataLoader()
1314 catalogAd = r.loadCatalog("proc", __file)
1315 r.addCatalog(catalogAd)
1320 p = xmlLoader.load(__file)
1321 except IOError as ex:
1322 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1324 logger = p.getLogger("parser")
1325 if not logger.isEmpty():
1326 print("The imported YACS XML schema has errors on parsing:")
1327 print(logger.getStr())
1330 print("The YACS XML schema is not valid and will not be executed:")
1331 print(p.getErrorReport())
1333 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1334 p.checkConsistency(info)
1335 if info.areWarningsOrErrors():
1336 print("The YACS XML schema is not coherent and will not be executed:")
1337 print(info.getGlobalRepr())
1339 e = pilot.ExecutorSwig()
1341 if p.getEffectiveState() != pilot.DONE:
1342 print(p.getErrorReport())
1346 def get(self, key = None):
1347 "Vérifie l'existence d'une clé de variable ou de paramètres"
1348 if key in self.__algorithm:
1349 return self.__algorithm.get( key )
1350 elif key in self.__P:
1351 return self.__P[key]
1353 allvariables = self.__P
1354 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1357 def pop(self, k, d):
1358 "Necessaire pour le pickling"
1359 return self.__algorithm.pop(k, d)
1361 def getAlgorithmRequiredParameters(self, noDetails=True):
1362 "Renvoie la liste des paramètres requis selon l'algorithme"
1363 return self.__algorithm.getRequiredParameters(noDetails)
1365 def getAlgorithmInputArguments(self):
1366 "Renvoie la liste des entrées requises selon l'algorithme"
1367 return self.__algorithm.getInputArguments()
1369 def getAlgorithmAttributes(self):
1370 "Renvoie la liste des attributs selon l'algorithme"
1371 return self.__algorithm.setAttributes()
1373 def setObserver(self, __V, __O, __I, __S):
1374 if self.__algorithm is None \
1375 or isinstance(self.__algorithm, dict) \
1376 or not hasattr(self.__algorithm,"StoredVariables"):
1377 raise ValueError("No observer can be build before choosing an algorithm.")
1378 if __V not in self.__algorithm:
1379 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1381 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1384 HookParameters = __I,
1387 def removeObserver(self, __V, __O, __A = False):
1388 if self.__algorithm is None \
1389 or isinstance(self.__algorithm, dict) \
1390 or not hasattr(self.__algorithm,"StoredVariables"):
1391 raise ValueError("No observer can be removed before choosing an algorithm.")
1392 if __V not in self.__algorithm:
1393 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1395 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1400 def hasObserver(self, __V):
1401 if self.__algorithm is None \
1402 or isinstance(self.__algorithm, dict) \
1403 or not hasattr(self.__algorithm,"StoredVariables"):
1405 if __V not in self.__algorithm:
1407 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1410 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1411 for k in self.__variable_names_not_public:
1412 if k in __allvariables: __allvariables.remove(k)
1413 return __allvariables
1415 def __contains__(self, key=None):
1416 "D.__contains__(k) -> True if D has a key k, else False"
1417 return key in self.__algorithm or key in self.__P
1420 "x.__repr__() <==> repr(x)"
1421 return repr(self.__A)+", "+repr(self.__P)
1424 "x.__str__() <==> str(x)"
1425 return str(self.__A)+", "+str(self.__P)
1427 def __setAlgorithm(self, choice = None ):
1429 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1430 d'assimilation. L'argument est un champ caractère se rapportant au nom
1431 d'un algorithme réalisant l'opération sur les arguments fixes.
1434 raise ValueError("Error: algorithm choice has to be given")
1435 if self.__algorithmName is not None:
1436 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1437 daDirectory = "daAlgorithms"
1439 # Recherche explicitement le fichier complet
1440 # ------------------------------------------
1442 for directory in sys.path:
1443 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1444 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1445 if module_path is None:
1447 "No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1449 # Importe le fichier complet comme un module
1450 # ------------------------------------------
1452 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1453 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1454 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1455 raise ImportError("this module does not define a valid elementary algorithm.")
1456 self.__algorithmName = str(choice)
1457 sys.path = sys_path_tmp ; del sys_path_tmp
1458 except ImportError as e:
1460 "The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1462 # Instancie un objet du type élémentaire du fichier
1463 # -------------------------------------------------
1464 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1467 def __shape_validate(self):
1469 Validation de la correspondance correcte des tailles des variables et
1470 des matrices s'il y en a.
1472 if self.__Xb is None: __Xb_shape = (0,)
1473 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1474 elif hasattr(self.__Xb,"shape"):
1475 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1476 else: __Xb_shape = self.__Xb.shape()
1477 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1479 if self.__Y is None: __Y_shape = (0,)
1480 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1481 elif hasattr(self.__Y,"shape"):
1482 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1483 else: __Y_shape = self.__Y.shape()
1484 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1486 if self.__U is None: __U_shape = (0,)
1487 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1488 elif hasattr(self.__U,"shape"):
1489 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1490 else: __U_shape = self.__U.shape()
1491 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1493 if self.__B is None: __B_shape = (0,0)
1494 elif hasattr(self.__B,"shape"):
1495 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1496 else: __B_shape = self.__B.shape()
1497 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1499 if self.__R is None: __R_shape = (0,0)
1500 elif hasattr(self.__R,"shape"):
1501 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1502 else: __R_shape = self.__R.shape()
1503 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1505 if self.__Q is None: __Q_shape = (0,0)
1506 elif hasattr(self.__Q,"shape"):
1507 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1508 else: __Q_shape = self.__Q.shape()
1509 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1511 if len(self.__HO) == 0: __HO_shape = (0,0)
1512 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1513 elif hasattr(self.__HO["Direct"],"shape"):
1514 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1515 else: __HO_shape = self.__HO["Direct"].shape()
1516 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1518 if len(self.__EM) == 0: __EM_shape = (0,0)
1519 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1520 elif hasattr(self.__EM["Direct"],"shape"):
1521 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1522 else: __EM_shape = self.__EM["Direct"].shape()
1523 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1525 if len(self.__CM) == 0: __CM_shape = (0,0)
1526 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1527 elif hasattr(self.__CM["Direct"],"shape"):
1528 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1529 else: __CM_shape = self.__CM["Direct"].shape()
1530 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1532 # Vérification des conditions
1533 # ---------------------------
1534 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1535 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1536 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1537 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1539 if not( min(__B_shape) == max(__B_shape) ):
1540 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1541 if not( min(__R_shape) == max(__R_shape) ):
1542 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1543 if not( min(__Q_shape) == max(__Q_shape) ):
1544 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1545 if not( min(__EM_shape) == max(__EM_shape) ):
1546 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1548 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1550 "Shape characteristic of observation operator (H)"+\
1551 " \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1552 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1554 "Shape characteristic of observation operator (H)"+\
1555 " \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1556 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1558 "Shape characteristic of observation operator (H)"+\
1559 " \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1560 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1562 "Shape characteristic of observation operator (H)"+\
1563 " \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1565 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1566 if self.__algorithmName in ["EnsembleBlue",]:
1567 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1568 self.__Xb = Persistence.OneVector("Background")
1569 for member in asPersistentVector:
1570 self.__Xb.store( numpy.asarray(member, dtype=float) )
1571 __Xb_shape = min(__B_shape)
1574 "Shape characteristic of a priori errors covariance matrix (B)"+\
1575 " \"%s\" and background vector (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1577 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1579 "Shape characteristic of observation errors covariance matrix (R)"+\
1580 " \"%s\" and observation vector (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1582 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1584 "Shape characteristic of evolution model (EM)"+\
1585 " \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1587 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1589 "Shape characteristic of control model (CM)"+\
1590 " \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1592 if ("Bounds" in self.__P) \
1593 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1594 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1595 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1596 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1598 if ("StateBoundsForQuantiles" in self.__P) \
1599 and (isinstance(self.__P["StateBoundsForQuantiles"], list) or isinstance(self.__P["StateBoundsForQuantiles"], tuple)) \
1600 and (len(self.__P["StateBoundsForQuantiles"]) != max(__Xb_shape)):
1601 raise ValueError("The number \"%s\" of bound pairs for the quantile state (X) components is different of the size \"%s\" of the state itself." \
1602 %(len(self.__P["StateBoundsForQuantiles"]),max(__Xb_shape)))
1606 # ==============================================================================
1607 class RegulationAndParameters(object):
1609 Classe générale d'interface d'action pour la régulation et ses paramètres
1612 name = "GenericRegulation",
1619 self.__name = str(name)
1622 if asAlgorithm is None and asScript is not None:
1623 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1625 __Algo = asAlgorithm
1627 if asDict is None and asScript is not None:
1628 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1632 if __Dict is not None:
1633 self.__P.update( dict(__Dict) )
1635 if __Algo is not None:
1636 self.__P.update( {"Algorithm":str(__Algo)} )
1638 def get(self, key = None):
1639 "Vérifie l'existence d'une clé de variable ou de paramètres"
1641 return self.__P[key]
1645 # ==============================================================================
1646 class DataObserver(object):
1648 Classe générale d'interface de type observer
1651 name = "GenericObserver",
1663 self.__name = str(name)
1668 if onVariable is None:
1669 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1670 elif type(onVariable) in (tuple, list):
1671 self.__V = tuple(map( str, onVariable ))
1672 if withInfo is None:
1675 self.__I = (str(withInfo),)*len(self.__V)
1676 elif isinstance(onVariable, str):
1677 self.__V = (onVariable,)
1678 if withInfo is None:
1679 self.__I = (onVariable,)
1681 self.__I = (str(withInfo),)
1683 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1685 if asObsObject is not None:
1686 self.__O = asObsObject
1688 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1689 __Function = Observer2Func(__FunctionText)
1690 self.__O = __Function.getfunc()
1692 for k in range(len(self.__V)):
1695 if ename not in withAlgo:
1696 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1698 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1701 "x.__repr__() <==> repr(x)"
1702 return repr(self.__V)+"\n"+repr(self.__O)
1705 "x.__str__() <==> str(x)"
1706 return str(self.__V)+"\n"+str(self.__O)
1708 # ==============================================================================
1709 class UserScript(object):
1711 Classe générale d'interface de type texte de script utilisateur
1714 name = "GenericUserScript",
1721 self.__name = str(name)
1723 if asString is not None:
1725 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1726 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1727 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1728 self.__F = Templates.ObserverTemplates[asTemplate]
1729 elif asScript is not None:
1730 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1735 "x.__repr__() <==> repr(x)"
1736 return repr(self.__F)
1739 "x.__str__() <==> str(x)"
1740 return str(self.__F)
1742 # ==============================================================================
1743 class ExternalParameters(object):
1745 Classe générale d'interface de type texte de script utilisateur
1748 name = "GenericExternalParameters",
1754 self.__name = str(name)
1757 self.updateParameters( asDict, asScript )
1759 def updateParameters(self,
1763 "Mise a jour des parametres"
1764 if asDict is None and asScript is not None:
1765 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1769 if __Dict is not None:
1770 self.__P.update( dict(__Dict) )
1772 def get(self, key = None):
1774 return self.__P[key]
1776 return list(self.__P.keys())
1779 return list(self.__P.keys())
1781 def pop(self, k, d):
1782 return self.__P.pop(k, d)
1785 return self.__P.items()
1787 def __contains__(self, key=None):
1788 "D.__contains__(k) -> True if D has a key k, else False"
1789 return key in self.__P
1791 # ==============================================================================
1792 class State(object):
1794 Classe générale d'interface de type état
1797 name = "GenericVector",
1799 asPersistentVector = None,
1805 toBeChecked = False,
1808 Permet de définir un vecteur :
1809 - asVector : entrée des données, comme un vecteur compatible avec le
1810 constructeur de numpy.matrix, ou "True" si entrée par script.
1811 - asPersistentVector : entrée des données, comme une série de vecteurs
1812 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1813 type Persistence, ou "True" si entrée par script.
1814 - asScript : si un script valide est donné contenant une variable
1815 nommée "name", la variable est de type "asVector" (par défaut) ou
1816 "asPersistentVector" selon que l'une de ces variables est placée à
1818 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1819 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1820 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1821 nommée "name"), on récupère les colonnes et on les range ligne après
1822 ligne (colMajor=False, par défaut) ou colonne après colonne
1823 (colMajor=True). La variable résultante est de type "asVector" (par
1824 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1827 self.__name = str(name)
1828 self.__check = bool(toBeChecked)
1832 self.__is_vector = False
1833 self.__is_series = False
1835 if asScript is not None:
1836 __Vector, __Series = None, None
1837 if asPersistentVector:
1838 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1840 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1841 elif asDataFile is not None:
1842 __Vector, __Series = None, None
1843 if asPersistentVector:
1844 if colNames is not None:
1845 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1847 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1848 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1849 __Series = numpy.transpose(__Series)
1850 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1851 __Series = numpy.transpose(__Series)
1853 if colNames is not None:
1854 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1856 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1858 __Vector = numpy.ravel(__Vector, order = "F")
1860 __Vector = numpy.ravel(__Vector, order = "C")
1862 __Vector, __Series = asVector, asPersistentVector
1864 if __Vector is not None:
1865 self.__is_vector = True
1866 if isinstance(__Vector, str):
1867 __Vector = PlatformInfo.strvect2liststr( __Vector )
1868 self.__V = numpy.ravel(numpy.asarray( __Vector, dtype=float )).reshape((-1,1))
1869 self.shape = self.__V.shape
1870 self.size = self.__V.size
1871 elif __Series is not None:
1872 self.__is_series = True
1873 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1874 self.__V = Persistence.OneVector(self.__name)
1875 if isinstance(__Series, str):
1876 __Series = PlatformInfo.strmatrix2liststr(__Series)
1877 for member in __Series:
1878 if isinstance(member, str):
1879 member = PlatformInfo.strvect2liststr( member )
1880 self.__V.store(numpy.asarray( member, dtype=float ))
1883 if isinstance(self.__V.shape, (tuple, list)):
1884 self.shape = self.__V.shape
1886 self.shape = self.__V.shape()
1887 if len(self.shape) == 1:
1888 self.shape = (self.shape[0],1)
1889 self.size = self.shape[0] * self.shape[1]
1892 "The %s object is improperly defined or undefined,"%self.__name+\
1893 " it requires at minima either a vector, a list/tuple of"+\
1894 " vectors or a persistent object. Please check your vector input.")
1896 if scheduledBy is not None:
1897 self.__T = scheduledBy
1899 def getO(self, withScheduler=False):
1901 return self.__V, self.__T
1902 elif self.__T is None:
1908 "Vérification du type interne"
1909 return self.__is_vector
1912 "Vérification du type interne"
1913 return self.__is_series
1916 "x.__repr__() <==> repr(x)"
1917 return repr(self.__V)
1920 "x.__str__() <==> str(x)"
1921 return str(self.__V)
1923 # ==============================================================================
1924 class Covariance(object):
1926 Classe générale d'interface de type covariance
1929 name = "GenericCovariance",
1930 asCovariance = None,
1931 asEyeByScalar = None,
1932 asEyeByVector = None,
1935 toBeChecked = False,
1938 Permet de définir une covariance :
1939 - asCovariance : entrée des données, comme une matrice compatible avec
1940 le constructeur de numpy.matrix
1941 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1942 multiplicatif d'une matrice de corrélation identité, aucune matrice
1943 n'étant donc explicitement à donner
1944 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1945 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1946 n'étant donc explicitement à donner
1947 - asCovObject : entrée des données comme un objet python, qui a les
1948 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1949 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1950 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1951 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1952 pleine doit être vérifié
1954 self.__name = str(name)
1955 self.__check = bool(toBeChecked)
1958 self.__is_scalar = False
1959 self.__is_vector = False
1960 self.__is_matrix = False
1961 self.__is_object = False
1963 if asScript is not None:
1964 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1966 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1968 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1970 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1972 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1974 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1976 if __Scalar is not None:
1977 if isinstance(__Scalar, str):
1978 __Scalar = PlatformInfo.strvect2liststr( __Scalar )
1979 if len(__Scalar) > 0: __Scalar = __Scalar[0]
1980 if numpy.array(__Scalar).size != 1:
1982 " The diagonal multiplier given to define a sparse matrix is"+\
1983 " not a unique scalar value.\n Its actual measured size is"+\
1984 " %i. Please check your scalar input."%numpy.array(__Scalar).size)
1985 self.__is_scalar = True
1986 self.__C = numpy.abs( float(__Scalar) )
1989 elif __Vector is not None:
1990 if isinstance(__Vector, str):
1991 __Vector = PlatformInfo.strvect2liststr( __Vector )
1992 self.__is_vector = True
1993 self.__C = numpy.abs( numpy.ravel(numpy.asarray( __Vector, dtype=float )) )
1994 self.shape = (self.__C.size,self.__C.size)
1995 self.size = self.__C.size**2
1996 elif __Matrix is not None:
1997 self.__is_matrix = True
1998 self.__C = numpy.matrix( __Matrix, float )
1999 self.shape = self.__C.shape
2000 self.size = self.__C.size
2001 elif __Object is not None:
2002 self.__is_object = True
2004 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__matmul__","__mul__","__rmatmul__","__rmul__"):
2005 if not hasattr(self.__C,at):
2006 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
2007 if hasattr(self.__C,"shape"):
2008 self.shape = self.__C.shape
2011 if hasattr(self.__C,"size"):
2012 self.size = self.__C.size
2020 def __validate(self):
2022 if self.__C is None:
2023 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
2024 if self.ismatrix() and min(self.shape) != max(self.shape):
2025 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
2026 if self.isobject() and min(self.shape) != max(self.shape):
2027 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
2028 if self.isscalar() and self.__C <= 0:
2029 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
2030 if self.isvector() and (self.__C <= 0).any():
2031 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
2032 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
2034 numpy.linalg.cholesky( self.__C )
2036 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
2037 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
2041 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
2044 "Vérification du type interne"
2045 return self.__is_scalar
2048 "Vérification du type interne"
2049 return self.__is_vector
2052 "Vérification du type interne"
2053 return self.__is_matrix
2056 "Vérification du type interne"
2057 return self.__is_object
2062 return Covariance(self.__name+"I", asCovariance = numpy.linalg.inv(self.__C) )
2063 elif self.isvector():
2064 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
2065 elif self.isscalar():
2066 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
2067 elif self.isobject() and hasattr(self.__C,"getI"):
2068 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
2070 return None # Indispensable
2075 return Covariance(self.__name+"T", asCovariance = self.__C.T )
2076 elif self.isvector():
2077 return Covariance(self.__name+"T", asEyeByVector = self.__C )
2078 elif self.isscalar():
2079 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
2080 elif self.isobject() and hasattr(self.__C,"getT"):
2081 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
2083 raise AttributeError("the %s covariance matrix has no getT attribute."%(self.__name,))
2086 "Décomposition de Cholesky"
2088 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
2089 elif self.isvector():
2090 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
2091 elif self.isscalar():
2092 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
2093 elif self.isobject() and hasattr(self.__C,"cholesky"):
2094 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
2096 raise AttributeError("the %s covariance matrix has no cholesky attribute."%(self.__name,))
2098 def choleskyI(self):
2099 "Inversion de la décomposition de Cholesky"
2101 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.linalg.cholesky(self.__C)) )
2102 elif self.isvector():
2103 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
2104 elif self.isscalar():
2105 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
2106 elif self.isobject() and hasattr(self.__C,"choleskyI"):
2107 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
2109 raise AttributeError("the %s covariance matrix has no choleskyI attribute."%(self.__name,))
2112 "Racine carrée matricielle"
2115 return Covariance(self.__name+"C", asCovariance = numpy.real(scipy.linalg.sqrtm(self.__C)) )
2116 elif self.isvector():
2117 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
2118 elif self.isscalar():
2119 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
2120 elif self.isobject() and hasattr(self.__C,"sqrtm"):
2121 return Covariance(self.__name+"C", asCovObject = self.__C.sqrtm() )
2123 raise AttributeError("the %s covariance matrix has no sqrtm attribute."%(self.__name,))
2126 "Inversion de la racine carrée matricielle"
2129 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.real(scipy.linalg.sqrtm(self.__C))) )
2130 elif self.isvector():
2131 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
2132 elif self.isscalar():
2133 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
2134 elif self.isobject() and hasattr(self.__C,"sqrtmI"):
2135 return Covariance(self.__name+"H", asCovObject = self.__C.sqrtmI() )
2137 raise AttributeError("the %s covariance matrix has no sqrtmI attribute."%(self.__name,))
2139 def diag(self, msize=None):
2140 "Diagonale de la matrice"
2142 return numpy.diag(self.__C)
2143 elif self.isvector():
2145 elif self.isscalar():
2147 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2149 return self.__C * numpy.ones(int(msize))
2150 elif self.isobject() and hasattr(self.__C,"diag"):
2151 return self.__C.diag()
2153 raise AttributeError("the %s covariance matrix has no diag attribute."%(self.__name,))
2155 def trace(self, msize=None):
2156 "Trace de la matrice"
2158 return numpy.trace(self.__C)
2159 elif self.isvector():
2160 return float(numpy.sum(self.__C))
2161 elif self.isscalar():
2163 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2165 return self.__C * int(msize)
2166 elif self.isobject():
2167 return self.__C.trace()
2169 raise AttributeError("the %s covariance matrix has no trace attribute."%(self.__name,))
2171 def asfullmatrix(self, msize=None):
2174 return numpy.asarray(self.__C, dtype=float)
2175 elif self.isvector():
2176 return numpy.asarray( numpy.diag(self.__C), dtype=float )
2177 elif self.isscalar():
2179 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2181 return numpy.asarray( self.__C * numpy.eye(int(msize)), dtype=float )
2182 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
2183 return self.__C.asfullmatrix()
2185 raise AttributeError("the %s covariance matrix has no asfullmatrix attribute."%(self.__name,))
2187 def assparsematrix(self):
2195 "x.__repr__() <==> repr(x)"
2196 return repr(self.__C)
2199 "x.__str__() <==> str(x)"
2200 return str(self.__C)
2202 def __add__(self, other):
2203 "x.__add__(y) <==> x+y"
2204 if self.ismatrix() or self.isobject():
2205 return self.__C + numpy.asmatrix(other)
2206 elif self.isvector() or self.isscalar():
2207 _A = numpy.asarray(other)
2208 if len(_A.shape) == 1:
2209 _A.reshape((-1,1))[::2] += self.__C
2211 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
2212 return numpy.asmatrix(_A)
2214 def __radd__(self, other):
2215 "x.__radd__(y) <==> y+x"
2216 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
2218 def __sub__(self, other):
2219 "x.__sub__(y) <==> x-y"
2220 if self.ismatrix() or self.isobject():
2221 return self.__C - numpy.asmatrix(other)
2222 elif self.isvector() or self.isscalar():
2223 _A = numpy.asarray(other)
2224 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
2225 return numpy.asmatrix(_A)
2227 def __rsub__(self, other):
2228 "x.__rsub__(y) <==> y-x"
2229 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
2232 "x.__neg__() <==> -x"
2235 def __matmul__(self, other):
2236 "x.__mul__(y) <==> x@y"
2237 if self.ismatrix() and isinstance(other, (int, float)):
2238 return numpy.asarray(self.__C) * other
2239 elif self.ismatrix() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2240 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2241 return numpy.ravel(self.__C @ numpy.ravel(other))
2242 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2243 return numpy.asarray(self.__C) @ numpy.asarray(other)
2245 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asarray(other).shape,self.__name))
2246 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2247 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2248 return numpy.ravel(self.__C) * numpy.ravel(other)
2249 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2250 return numpy.ravel(self.__C).reshape((-1,1)) * numpy.asarray(other)
2252 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2253 elif self.isscalar() and isinstance(other,numpy.matrix):
2254 return numpy.asarray(self.__C * other)
2255 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2256 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2257 return self.__C * numpy.ravel(other)
2259 return self.__C * numpy.asarray(other)
2260 elif self.isobject():
2261 return self.__C.__matmul__(other)
2263 raise NotImplementedError("%s covariance matrix __matmul__ method not available for %s type!"%(self.__name,type(other)))
2265 def __mul__(self, other):
2266 "x.__mul__(y) <==> x*y"
2267 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2268 return self.__C * other
2269 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2270 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2271 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2272 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2273 return self.__C * numpy.asmatrix(other)
2276 "operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
2277 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2278 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2279 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
2280 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2281 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
2284 "operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2285 elif self.isscalar() and isinstance(other,numpy.matrix):
2286 return self.__C * other
2287 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2288 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2289 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2291 return self.__C * numpy.asmatrix(other)
2292 elif self.isobject():
2293 return self.__C.__mul__(other)
2295 raise NotImplementedError(
2296 "%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
2298 def __rmatmul__(self, other):
2299 "x.__rmul__(y) <==> y@x"
2300 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2301 return other * self.__C
2302 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2303 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2304 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2305 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2306 return numpy.asmatrix(other) * self.__C
2309 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2310 elif self.isvector() and isinstance(other,numpy.matrix):
2311 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2312 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2313 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2314 return numpy.asmatrix(numpy.array(other) * self.__C)
2317 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2318 elif self.isscalar() and isinstance(other,numpy.matrix):
2319 return other * self.__C
2320 elif self.isobject():
2321 return self.__C.__rmatmul__(other)
2323 raise NotImplementedError(
2324 "%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name,type(other)))
2326 def __rmul__(self, other):
2327 "x.__rmul__(y) <==> y*x"
2328 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2329 return other * self.__C
2330 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2331 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2332 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2333 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2334 return numpy.asmatrix(other) * self.__C
2337 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2338 elif self.isvector() and isinstance(other,numpy.matrix):
2339 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2340 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2341 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2342 return numpy.asmatrix(numpy.array(other) * self.__C)
2345 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2346 elif self.isscalar() and isinstance(other,numpy.matrix):
2347 return other * self.__C
2348 elif self.isscalar() and isinstance(other,float):
2349 return other * self.__C
2350 elif self.isobject():
2351 return self.__C.__rmul__(other)
2353 raise NotImplementedError(
2354 "%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
2357 "x.__len__() <==> len(x)"
2358 return self.shape[0]
2360 # ==============================================================================
2361 class Observer2Func(object):
2363 Création d'une fonction d'observateur a partir de son texte
2365 def __init__(self, corps=""):
2366 self.__corps = corps
2367 def func(self,var,info):
2368 "Fonction d'observation"
2371 "Restitution du pointeur de fonction dans l'objet"
2374 # ==============================================================================
2375 class CaseLogger(object):
2377 Conservation des commandes de création d'un cas
2379 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
2380 self.__name = str(__name)
2381 self.__objname = str(__objname)
2382 self.__logSerie = []
2383 self.__switchoff = False
2385 "TUI" :Interfaces._TUIViewer,
2386 "SCD" :Interfaces._SCDViewer,
2387 "YACS":Interfaces._YACSViewer,
2388 "SimpleReportInRst":Interfaces._SimpleReportInRstViewer,
2389 "SimpleReportInHtml":Interfaces._SimpleReportInHtmlViewer,
2390 "SimpleReportInPlainTxt":Interfaces._SimpleReportInPlainTxtViewer,
2393 "TUI" :Interfaces._TUIViewer,
2394 "COM" :Interfaces._COMViewer,
2396 if __addViewers is not None:
2397 self.__viewers.update(dict(__addViewers))
2398 if __addLoaders is not None:
2399 self.__loaders.update(dict(__addLoaders))
2401 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
2402 "Enregistrement d'une commande individuelle"
2403 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
2404 if "self" in __keys: __keys.remove("self")
2405 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
2407 self.__switchoff = True
2409 self.__switchoff = False
2411 def dump(self, __filename=None, __format="TUI", __upa=""):
2412 "Restitution normalisée des commandes"
2413 if __format in self.__viewers:
2414 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2416 raise ValueError("Dumping as \"%s\" is not available"%__format)
2417 return __formater.dump(__filename, __upa)
2419 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2420 "Chargement normalisé des commandes"
2421 if __format in self.__loaders:
2422 __formater = self.__loaders[__format]()
2424 raise ValueError("Loading as \"%s\" is not available"%__format)
2425 return __formater.load(__filename, __content, __object)
2427 # ==============================================================================
2430 _extraArguments = None,
2431 _sFunction = lambda x: x,
2436 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2437 correspondante de valeurs de la fonction en argument
2439 # Vérifications et définitions initiales
2440 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2441 if not PlatformInfo.isIterable( __xserie ):
2442 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2444 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2447 __mpWorkers = int(_mpWorkers)
2449 import multiprocessing
2460 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2461 with multiprocessing.Pool(__mpWorkers) as pool:
2462 __multiHX = pool.map( _sFunction, _jobs )
2465 # logging.debug("MULTF Internal multiprocessing calculation end")
2467 # logging.debug("MULTF Internal monoprocessing calculation begin")
2469 if _extraArguments is None:
2470 for __xvalue in __xserie:
2471 __multiHX.append( _sFunction( __xvalue ) )
2472 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2473 for __xvalue in __xserie:
2474 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2475 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2476 for __xvalue in __xserie:
2477 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2479 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2480 # logging.debug("MULTF Internal monoprocessing calculation end")
2482 # logging.debug("MULTF Internal multifonction calculations end")
2485 # ==============================================================================
2486 if __name__ == "__main__":
2487 print('\n AUTODIAGNOSTIC\n')