1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2023 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
36 from functools import partial
37 from daCore import Persistence, PlatformInfo, Interfaces
38 from daCore import Templates
40 # ==============================================================================
41 class CacheManager(object):
43 Classe générale de gestion d'un cache de calculs
46 toleranceInRedundancy = 1.e-18,
47 lengthOfRedundancy = -1,
50 Les caractéristiques de tolérance peuvent être modifiées à la création.
52 self.__tolerBP = float(toleranceInRedundancy)
53 self.__lengthOR = int(lengthOfRedundancy)
54 self.__initlnOR = self.__lengthOR
64 def wasCalculatedIn(self, xValue, oName="" ):
65 "Vérifie l'existence d'un calcul correspondant à la valeur"
69 for i in range(min(len(self.__listOPCV),self.__lengthOR)-1,-1,-1):
70 if not hasattr(xValue, 'size'):
72 elif (str(oName) != self.__listOPCV[i][3]):
74 elif (xValue.size != self.__listOPCV[i][0].size):
76 elif (numpy.ravel(xValue)[0] - self.__listOPCV[i][0][0]) > (self.__tolerBP * self.__listOPCV[i][2] / self.__listOPCV[i][0].size):
78 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < (self.__tolerBP * self.__listOPCV[i][2]):
80 __HxV = self.__listOPCV[i][1]
84 def storeValueInX(self, xValue, HxValue, oName="" ):
85 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
86 if self.__lengthOR < 0:
87 self.__lengthOR = 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
88 self.__initlnOR = self.__lengthOR
89 self.__seenNames.append(str(oName))
90 if str(oName) not in self.__seenNames: # Etend la liste si nouveau
91 self.__lengthOR += 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
92 self.__initlnOR += self.__lengthOR
93 self.__seenNames.append(str(oName))
94 while len(self.__listOPCV) > self.__lengthOR:
95 self.__listOPCV.pop(0)
96 self.__listOPCV.append( (
97 copy.copy(numpy.ravel(xValue)), # 0 Previous point
98 copy.copy(HxValue), # 1 Previous value
99 numpy.linalg.norm(xValue), # 2 Norm
100 str(oName), # 3 Operator name
105 self.__initlnOR = self.__lengthOR
107 self.__enabled = False
111 self.__lengthOR = self.__initlnOR
112 self.__enabled = True
114 # ==============================================================================
115 class Operator(object):
117 Classe générale d'interface de type opérateur simple
125 name = "GenericOperator",
128 avoidingRedundancy = True,
129 reducingMemoryUse = False,
130 inputAsMultiFunction = False,
131 enableMultiProcess = False,
132 extraArguments = None,
135 On construit un objet de ce type en fournissant, à l'aide de l'un des
136 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
139 - name : nom d'opérateur
140 - fromMethod : argument de type fonction Python
141 - fromMatrix : argument adapté au constructeur numpy.array/matrix
142 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
143 - reducingMemoryUse : booléen forçant (ou pas) des calculs moins
145 - inputAsMultiFunction : booléen indiquant une fonction explicitement
146 définie (ou pas) en multi-fonction
147 - extraArguments : arguments supplémentaires passés à la fonction de
148 base et ses dérivées (tuple ou dictionnaire)
150 self.__name = str(name)
151 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
152 self.__reduceM = bool( reducingMemoryUse )
153 self.__avoidRC = bool( avoidingRedundancy )
154 self.__inputAsMF = bool( inputAsMultiFunction )
155 self.__mpEnabled = bool( enableMultiProcess )
156 self.__extraArgs = extraArguments
157 if fromMethod is not None and self.__inputAsMF:
158 self.__Method = fromMethod # logtimer(fromMethod)
160 self.__Type = "Method"
161 elif fromMethod is not None and not self.__inputAsMF:
162 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
164 self.__Type = "Method"
165 elif fromMatrix is not None:
167 if isinstance(fromMatrix, str):
168 fromMatrix = PlatformInfo.strmatrix2liststr( fromMatrix )
169 self.__Matrix = numpy.asarray( fromMatrix, dtype=float )
170 self.__Type = "Matrix"
176 def disableAvoidingRedundancy(self):
178 Operator.CM.disable()
180 def enableAvoidingRedundancy(self):
185 Operator.CM.disable()
191 def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False):
193 Permet de restituer le résultat de l'application de l'opérateur à une
194 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
195 argument devant a priori être du bon type.
197 - les arguments par série sont :
198 - xValue : argument adapté pour appliquer l'opérateur
199 - HValue : valeur précalculée de l'opérateur en ce point
200 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
207 if HValue is not None:
211 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
213 if _HValue is not None:
214 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
216 for i in range(len(_HValue)):
217 _HxValue.append( _HValue[i] )
219 Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name)
224 for i, xv in enumerate(_xValue):
226 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
228 __alreadyCalculated = False
230 if __alreadyCalculated:
231 self.__addOneCacheCall()
234 if self.__Matrix is not None:
235 self.__addOneMatrixCall()
236 _hv = self.__Matrix @ numpy.ravel(xv)
238 self.__addOneMethodCall()
242 _HxValue.append( _hv )
244 if len(_xserie)>0 and self.__Matrix is None:
245 if self.__extraArgs is None:
246 _hserie = self.__Method( _xserie ) # Calcul MF
248 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
249 if not hasattr(_hserie, "pop"):
251 "The user input multi-function doesn't seem to return a"+\
252 " result sequence, behaving like a mono-function. It has"+\
260 Operator.CM.storeValueInX(_xv,_hv,self.__name)
262 if returnSerieAsArrayMatrix:
263 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
265 if argsAsSerie: return _HxValue
266 else: return _HxValue[-1]
268 def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
270 Permet de restituer le résultat de l'application de l'opérateur à des
271 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
272 argument devant a priori être du bon type. Si la uValue est None,
273 on suppose que l'opérateur ne s'applique qu'à xValue.
275 - paires : les arguments par paire sont :
276 - xValue : argument X adapté pour appliquer l'opérateur
277 - uValue : argument U adapté pour appliquer l'opérateur
278 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
280 if argsAsSerie: _xuValue = paires
281 else: _xuValue = (paires,)
282 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
284 if self.__Matrix is not None:
286 for paire in _xuValue:
287 _xValue, _uValue = paire
288 self.__addOneMatrixCall()
289 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
292 for paire in _xuValue:
293 _xValue, _uValue = paire
294 if _uValue is not None:
295 _xuArgs.append( paire )
297 _xuArgs.append( _xValue )
298 self.__addOneMethodCall( len(_xuArgs) )
299 if self.__extraArgs is None:
300 _HxValue = self.__Method( _xuArgs ) # Calcul MF
302 _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF
304 if returnSerieAsArrayMatrix:
305 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
307 if argsAsSerie: return _HxValue
308 else: return _HxValue[-1]
310 def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
312 Permet de restituer le résultat de l'application de l'opérateur à une
313 série d'arguments xValue, sachant que l'opérateur est valable en
314 xNominal. Cette méthode se contente d'appliquer, son argument devant a
315 priori être du bon type. Si l'opérateur est linéaire car c'est une
316 matrice, alors il est valable en tout point nominal et xNominal peut
317 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
318 permet d'indiquer que l'argument est multi-paires.
320 - paires : les arguments par paire sont :
321 - xNominal : série d'arguments permettant de donner le point où
322 l'opérateur est construit pour être ensuite appliqué
323 - xValue : série d'arguments adaptés pour appliquer l'opérateur
324 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
326 if argsAsSerie: _nxValue = paires
327 else: _nxValue = (paires,)
328 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
330 if self.__Matrix is not None:
332 for paire in _nxValue:
333 _xNominal, _xValue = paire
334 self.__addOneMatrixCall()
335 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
337 self.__addOneMethodCall( len(_nxValue) )
338 if self.__extraArgs is None:
339 _HxValue = self.__Method( _nxValue ) # Calcul MF
341 _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
343 if returnSerieAsArrayMatrix:
344 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
346 if argsAsSerie: return _HxValue
347 else: return _HxValue[-1]
349 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
351 Permet de renvoyer l'opérateur sous la forme d'une matrice
353 if self.__Matrix is not None:
354 self.__addOneMatrixCall()
355 mValue = [self.__Matrix,]
356 elif not isinstance(ValueForMethodForm,str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
359 self.__addOneMethodCall( len(ValueForMethodForm) )
360 for _vfmf in ValueForMethodForm:
361 mValue.append( self.__Method(((_vfmf, None),)) )
363 self.__addOneMethodCall()
364 mValue = self.__Method(((ValueForMethodForm, None),))
366 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
368 if argsAsSerie: return mValue
369 else: return mValue[-1]
373 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
374 la forme d'une matrice
376 if self.__Matrix is not None:
377 return self.__Matrix.shape
379 raise ValueError("Matrix form of the operator is not available, nor the shape")
381 def nbcalls(self, which=None):
383 Renvoie les nombres d'évaluations de l'opérateur
386 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
387 self.__NbCallsAsMatrix,
388 self.__NbCallsAsMethod,
389 self.__NbCallsOfCached,
390 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
391 Operator.NbCallsAsMatrix,
392 Operator.NbCallsAsMethod,
393 Operator.NbCallsOfCached,
395 if which is None: return __nbcalls
396 else: return __nbcalls[which]
398 def __addOneMatrixCall(self):
399 "Comptabilise un appel"
400 self.__NbCallsAsMatrix += 1 # Decompte local
401 Operator.NbCallsAsMatrix += 1 # Decompte global
403 def __addOneMethodCall(self, nb = 1):
404 "Comptabilise un appel"
405 self.__NbCallsAsMethod += nb # Decompte local
406 Operator.NbCallsAsMethod += nb # Decompte global
408 def __addOneCacheCall(self):
409 "Comptabilise un appel"
410 self.__NbCallsOfCached += 1 # Decompte local
411 Operator.NbCallsOfCached += 1 # Decompte global
413 # ==============================================================================
414 class FullOperator(object):
416 Classe générale d'interface de type opérateur complet
417 (Direct, Linéaire Tangent, Adjoint)
420 name = "GenericFullOperator",
422 asOneFunction = None, # 1 Fonction
423 asThreeFunctions = None, # 3 Fonctions in a dictionary
424 asScript = None, # 1 or 3 Fonction(s) by script
425 asDict = None, # Parameters
427 extraArguments = None,
428 performancePrf = None,
429 inputAsMF = False,# Fonction(s) as Multi-Functions
434 self.__name = str(name)
435 self.__check = bool(toBeChecked)
436 self.__extraArgs = extraArguments
441 if (asDict is not None) and isinstance(asDict, dict):
442 __Parameters.update( asDict )
443 # Priorité à EnableMultiProcessingInDerivatives=True
444 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
445 __Parameters["EnableMultiProcessingInDerivatives"] = True
446 __Parameters["EnableMultiProcessingInEvaluation"] = False
447 if "EnableMultiProcessingInDerivatives" not in __Parameters:
448 __Parameters["EnableMultiProcessingInDerivatives"] = False
449 if __Parameters["EnableMultiProcessingInDerivatives"]:
450 __Parameters["EnableMultiProcessingInEvaluation"] = False
451 if "EnableMultiProcessingInEvaluation" not in __Parameters:
452 __Parameters["EnableMultiProcessingInEvaluation"] = False
453 if "withIncrement" in __Parameters: # Temporaire
454 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
455 # Le défaut est équivalent à "ReducedOverallRequirements"
456 __reduceM, __avoidRC = True, True
457 if performancePrf is not None:
458 if performancePrf == "ReducedAmountOfCalculation":
459 __reduceM, __avoidRC = False, True
460 elif performancePrf == "ReducedMemoryFootprint":
461 __reduceM, __avoidRC = True, False
462 elif performancePrf == "NoSavings":
463 __reduceM, __avoidRC = False, False
465 if asScript is not None:
466 __Matrix, __Function = None, None
468 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
470 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
471 __Function.update({"useApproximatedDerivatives":True})
472 __Function.update(__Parameters)
473 elif asThreeFunctions:
475 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
476 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
477 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
479 __Function.update(__Parameters)
482 if asOneFunction is not None:
483 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
484 if asOneFunction["Direct"] is not None:
485 __Function = asOneFunction
487 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
489 __Function = { "Direct":asOneFunction }
490 __Function.update({"useApproximatedDerivatives":True})
491 __Function.update(__Parameters)
492 elif asThreeFunctions is not None:
493 if isinstance(asThreeFunctions, dict) and \
494 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
495 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
496 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
497 __Function = asThreeFunctions
498 elif isinstance(asThreeFunctions, dict) and \
499 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
500 __Function = asThreeFunctions
501 __Function.update({"useApproximatedDerivatives":True})
504 "The functions has to be given in a dictionnary which have either"+\
505 " 1 key (\"Direct\") or"+\
506 " 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
507 if "Direct" not in asThreeFunctions:
508 __Function["Direct"] = asThreeFunctions["Tangent"]
509 __Function.update(__Parameters)
513 if appliedInX is not None and isinstance(appliedInX, dict):
514 __appliedInX = appliedInX
515 elif appliedInX is not None:
516 __appliedInX = {"HXb":appliedInX}
520 if scheduledBy is not None:
521 self.__T = scheduledBy
523 if isinstance(__Function, dict) and \
524 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
525 ("Direct" in __Function) and (__Function["Direct"] is not None):
526 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
527 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
528 if "withdX" not in __Function: __Function["withdX"] = None
529 if "withReducingMemoryUse" not in __Function: __Function["withReducingMemoryUse"] = __reduceM
530 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = __avoidRC
531 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
532 if "withLengthOfRedundancy" not in __Function: __Function["withLengthOfRedundancy"] = -1
533 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
534 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
535 from daCore import NumericObjects
536 FDA = NumericObjects.FDApproximation(
538 Function = __Function["Direct"],
539 centeredDF = __Function["CenteredFiniteDifference"],
540 increment = __Function["DifferentialIncrement"],
541 dX = __Function["withdX"],
542 extraArguments = self.__extraArgs,
543 reducingMemoryUse = __Function["withReducingMemoryUse"],
544 avoidingRedundancy = __Function["withAvoidingRedundancy"],
545 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
546 lengthOfRedundancy = __Function["withLengthOfRedundancy"],
547 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
548 mpWorkers = __Function["NumberOfProcesses"],
549 mfEnabled = __Function["withmfEnabled"],
551 self.__FO["Direct"] = Operator(
553 fromMethod = FDA.DirectOperator,
554 reducingMemoryUse = __reduceM,
555 avoidingRedundancy = __avoidRC,
556 inputAsMultiFunction = inputAsMF,
557 extraArguments = self.__extraArgs,
558 enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
559 self.__FO["Tangent"] = Operator(
560 name = self.__name+"Tangent",
561 fromMethod = FDA.TangentOperator,
562 reducingMemoryUse = __reduceM,
563 avoidingRedundancy = __avoidRC,
564 inputAsMultiFunction = inputAsMF,
565 extraArguments = self.__extraArgs )
566 self.__FO["Adjoint"] = Operator(
567 name = self.__name+"Adjoint",
568 fromMethod = FDA.AdjointOperator,
569 reducingMemoryUse = __reduceM,
570 avoidingRedundancy = __avoidRC,
571 inputAsMultiFunction = inputAsMF,
572 extraArguments = self.__extraArgs )
573 elif isinstance(__Function, dict) and \
574 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
575 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
576 self.__FO["Direct"] = Operator(
578 fromMethod = __Function["Direct"],
579 reducingMemoryUse = __reduceM,
580 avoidingRedundancy = __avoidRC,
581 inputAsMultiFunction = inputAsMF,
582 extraArguments = self.__extraArgs,
583 enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
584 self.__FO["Tangent"] = Operator(
585 name = self.__name+"Tangent",
586 fromMethod = __Function["Tangent"],
587 reducingMemoryUse = __reduceM,
588 avoidingRedundancy = __avoidRC,
589 inputAsMultiFunction = inputAsMF,
590 extraArguments = self.__extraArgs )
591 self.__FO["Adjoint"] = Operator(
592 name = self.__name+"Adjoint",
593 fromMethod = __Function["Adjoint"],
594 reducingMemoryUse = __reduceM,
595 avoidingRedundancy = __avoidRC,
596 inputAsMultiFunction = inputAsMF,
597 extraArguments = self.__extraArgs )
598 elif asMatrix is not None:
599 if isinstance(__Matrix, str):
600 __Matrix = PlatformInfo.strmatrix2liststr( __Matrix )
601 __matrice = numpy.asarray( __Matrix, dtype=float )
602 self.__FO["Direct"] = Operator(
604 fromMatrix = __matrice,
605 reducingMemoryUse = __reduceM,
606 avoidingRedundancy = __avoidRC,
607 inputAsMultiFunction = inputAsMF,
608 enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
609 self.__FO["Tangent"] = Operator(
610 name = self.__name+"Tangent",
611 fromMatrix = __matrice,
612 reducingMemoryUse = __reduceM,
613 avoidingRedundancy = __avoidRC,
614 inputAsMultiFunction = inputAsMF )
615 self.__FO["Adjoint"] = Operator(
616 name = self.__name+"Adjoint",
617 fromMatrix = __matrice.T,
618 reducingMemoryUse = __reduceM,
619 avoidingRedundancy = __avoidRC,
620 inputAsMultiFunction = inputAsMF )
624 "The %s object is improperly defined or undefined,"%self.__name+\
625 " it requires at minima either a matrix, a Direct operator for"+\
626 " approximate derivatives or a Tangent/Adjoint operators pair."+\
627 " Please check your operator input.")
629 if __appliedInX is not None:
630 self.__FO["AppliedInX"] = {}
631 for key in __appliedInX:
632 if isinstance(__appliedInX[key], str):
633 __appliedInX[key] = PlatformInfo.strvect2liststr( __appliedInX[key] )
634 self.__FO["AppliedInX"][key] = numpy.ravel( __appliedInX[key] ).reshape((-1,1))
636 self.__FO["AppliedInX"] = None
642 "x.__repr__() <==> repr(x)"
643 return repr(self.__FO)
646 "x.__str__() <==> str(x)"
647 return str(self.__FO)
649 # ==============================================================================
650 class Algorithm(object):
652 Classe générale d'interface de type algorithme
654 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
655 d'assimilation, en fournissant un container (dictionnaire) de variables
656 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
658 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
660 def __init__(self, name):
662 L'initialisation présente permet de fabriquer des variables de stockage
663 disponibles de manière générique dans les algorithmes élémentaires. Ces
664 variables de stockage sont ensuite conservées dans un dictionnaire
665 interne à l'objet, mais auquel on accède par la méthode "get".
667 Les variables prévues sont :
668 - APosterioriCorrelations : matrice de corrélations de la matrice A
669 - APosterioriCovariance : matrice de covariances a posteriori : A
670 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
671 - APosterioriVariances : vecteur des variances de la matrice A
672 - Analysis : vecteur d'analyse : Xa
673 - BMA : Background moins Analysis : Xa - Xb
674 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
675 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
676 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
677 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
678 - CostFunctionJo : partie observations de la fonction-coût : Jo
679 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
680 - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
681 - CurrentOptimum : état optimal courant lors d'itérations
682 - CurrentState : état courant lors d'itérations
683 - CurrentStepNumber : pas courant d'avancement dans les algorithmes en évolution, à partir de 0
684 - EnsembleOfSnapshots : ensemble d'états rangés par colonne dans une matrice
685 - ForecastCovariance : covariance de l'état prédit courant lors d'itérations
686 - ForecastState : état prédit courant lors d'itérations
687 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
688 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
689 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
690 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
691 - Innovation : l'innovation : d = Y - H(X)
692 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
693 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
694 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
695 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
696 - KalmanGainAtOptimum : gain de Kalman à l'optimum
697 - MahalanobisConsistency : indicateur de consistance des covariances
698 - OMA : Observation moins Analyse : Y - Xa
699 - OMB : Observation moins Background : Y - Xb
700 - Residu : dans le cas des algorithmes de vérification
701 - SampledStateForQuantiles : échantillons d'états pour l'estimation des quantiles
702 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
703 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
704 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
705 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
706 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
707 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
708 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
709 On peut rajouter des variables à stocker dans l'initialisation de
710 l'algorithme élémentaire qui va hériter de cette classe
712 logging.debug("%s Initialisation", str(name))
713 self._m = PlatformInfo.SystemUsage()
715 self._name = str( name )
716 self._parameters = {"StoreSupplementaryCalculations":[]}
717 self.__internal_state = {}
718 self.__required_parameters = {}
719 self.__required_inputs = {
720 "RequiredInputValues":{"mandatory":(), "optional":()},
721 "ClassificationTags":[],
723 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
724 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
725 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
726 self.__replace_by_the_new_name = {} # Nouveau nom à partir d'un nom ancien
728 self.StoredVariables = {}
729 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
730 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
731 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
732 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
733 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
734 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
735 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
736 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
737 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
738 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
739 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
740 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
741 self.StoredVariables["CurrentEnsembleState"] = Persistence.OneMatrix(name = "CurrentEnsembleState")
742 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
743 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
744 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
745 self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
746 self.StoredVariables["EnsembleOfSnapshots"] = Persistence.OneMatrix(name = "EnsembleOfSnapshots")
747 self.StoredVariables["ForecastCovariance"] = Persistence.OneMatrix(name = "ForecastCovariance")
748 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
749 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
750 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
751 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
752 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
753 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
754 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
755 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
756 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
757 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
758 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
759 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
760 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
761 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
762 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
763 self.StoredVariables["OptimalPoints"] = Persistence.OneVector(name = "OptimalPoints")
764 self.StoredVariables["ReducedBasis"] = Persistence.OneMatrix(name = "ReducedBasis")
765 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
766 self.StoredVariables["Residus"] = Persistence.OneVector(name = "Residus")
767 self.StoredVariables["SampledStateForQuantiles"] = Persistence.OneMatrix(name = "SampledStateForQuantiles")
768 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
769 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
770 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
771 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
772 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
773 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
774 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
775 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
777 for k in self.StoredVariables:
778 self.__canonical_stored_name[k.lower()] = k
780 for k, v in self.__variable_names_not_public.items():
781 self.__canonical_parameter_name[k.lower()] = k
782 self.__canonical_parameter_name["algorithm"] = "Algorithm"
783 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
785 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
787 logging.debug("%s Lancement", self._name)
788 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
789 self._getTimeState(reset=True)
791 # Mise a jour des paramètres internes avec le contenu de Parameters, en
792 # reprenant les valeurs par défauts pour toutes celles non définies
793 self.__setParameters(Parameters, reset=True)
794 for k, v in self.__variable_names_not_public.items():
795 if k not in self._parameters: self.__setParameters( {k:v} )
797 # Corrections et compléments des vecteurs
798 def __test_vvalue(argument, variable, argname, symbol=None):
799 if symbol is None: symbol = variable
801 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
802 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol))
803 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
804 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol))
806 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol))
808 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
809 logging.debug("%s %s vector %s is required and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
810 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
811 logging.debug("%s %s vector %s is optional and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
814 "%s %s vector %s is set although neither required nor optional, and its size is %i."%(
815 self._name,argname,symbol,numpy.array(argument).size))
817 __test_vvalue( Xb, "Xb", "Background or initial state" )
818 __test_vvalue( Y, "Y", "Observation" )
819 __test_vvalue( U, "U", "Control" )
821 # Corrections et compléments des covariances
822 def __test_cvalue(argument, variable, argname, symbol=None):
823 if symbol is None: symbol = variable
825 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
826 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol))
827 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
828 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol))
830 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol))
832 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
833 logging.debug("%s %s error covariance matrix %s is required and set."%(self._name,argname,symbol))
834 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
835 logging.debug("%s %s error covariance matrix %s is optional and set."%(self._name,argname,symbol))
837 logging.debug("%s %s error covariance matrix %s is set although neither required nor optional."%(self._name,argname,symbol))
839 __test_cvalue( B, "B", "Background" )
840 __test_cvalue( R, "R", "Observation" )
841 __test_cvalue( Q, "Q", "Evolution" )
843 # Corrections et compléments des opérateurs
844 def __test_ovalue(argument, variable, argname, symbol=None):
845 if symbol is None: symbol = variable
846 if argument is None or (isinstance(argument,dict) and len(argument)==0):
847 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
848 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol))
849 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
850 logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol))
852 logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol))
854 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
855 logging.debug("%s %s operator %s is required and set."%(self._name,argname,symbol))
856 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
857 logging.debug("%s %s operator %s is optional and set."%(self._name,argname,symbol))
859 logging.debug("%s %s operator %s is set although neither required nor optional."%(self._name,argname,symbol))
861 __test_ovalue( HO, "HO", "Observation", "H" )
862 __test_ovalue( EM, "EM", "Evolution", "M" )
863 __test_ovalue( CM, "CM", "Control Model", "C" )
865 # Corrections et compléments des bornes
866 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
867 logging.debug("%s Bounds taken into account"%(self._name,))
869 self._parameters["Bounds"] = None
870 if ("StateBoundsForQuantiles" in self._parameters) \
871 and isinstance(self._parameters["StateBoundsForQuantiles"], (list, tuple)) \
872 and (len(self._parameters["StateBoundsForQuantiles"]) > 0):
873 logging.debug("%s Bounds for quantiles states taken into account"%(self._name,))
874 # Attention : contrairement à Bounds, pas de défaut à None, sinon on ne peut pas être sans bornes
876 # Corrections et compléments de l'initialisation en X
877 if "InitializationPoint" in self._parameters:
879 if self._parameters["InitializationPoint"] is not None and hasattr(self._parameters["InitializationPoint"],'size'):
880 if self._parameters["InitializationPoint"].size != numpy.ravel(Xb).size:
881 raise ValueError("Incompatible size %i of forced initial point that have to replace the background of size %i" \
882 %(self._parameters["InitializationPoint"].size,numpy.ravel(Xb).size))
883 # Obtenu par typecast : numpy.ravel(self._parameters["InitializationPoint"])
885 self._parameters["InitializationPoint"] = numpy.ravel(Xb)
887 if self._parameters["InitializationPoint"] is None:
888 raise ValueError("Forced initial point can not be set without any given Background or required value")
890 # Correction pour pallier a un bug de TNC sur le retour du Minimum
891 if "Minimizer" in self._parameters and self._parameters["Minimizer"] == "TNC":
892 self.setParameterValue("StoreInternalVariables",True)
894 # Verbosité et logging
895 if logging.getLogger().level < logging.WARNING:
896 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
897 self._parameters["optmessages"] = 15
899 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
900 self._parameters["optmessages"] = 0
904 def _post_run(self,_oH=None):
906 if ("StoreSupplementaryCalculations" in self._parameters) and \
907 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
908 for _A in self.StoredVariables["APosterioriCovariance"]:
909 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
910 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
911 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
912 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
913 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
914 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
915 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
916 self.StoredVariables["APosterioriCorrelations"].store( _C )
917 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
919 "%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i",
920 self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
922 "%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i",
923 self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
924 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
925 logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
926 logging.debug("%s Terminé", self._name)
929 def _toStore(self, key):
930 "True if in StoreSupplementaryCalculations, else False"
931 return key in self._parameters["StoreSupplementaryCalculations"]
933 def get(self, key=None):
935 Renvoie l'une des variables stockées identifiée par la clé, ou le
936 dictionnaire de l'ensemble des variables disponibles en l'absence de
937 clé. Ce sont directement les variables sous forme objet qui sont
938 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
939 des classes de persistance.
942 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
944 return self.StoredVariables
946 def __contains__(self, key=None):
947 "D.__contains__(k) -> True if D has a key k, else False"
948 if key is None or key.lower() not in self.__canonical_stored_name:
951 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
954 "D.keys() -> list of D's keys"
955 if hasattr(self, "StoredVariables"):
956 return self.StoredVariables.keys()
961 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
962 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
963 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
968 raise TypeError("pop expected at least 1 arguments, got 0")
969 "If key is not found, d is returned if given, otherwise KeyError is raised"
975 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
977 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
978 sa forme mathématique la plus naturelle possible.
980 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
982 def defineRequiredParameter(self,
994 Permet de définir dans l'algorithme des paramètres requis et leurs
995 caractéristiques par défaut.
998 raise ValueError("A name is mandatory to define a required parameter.")
1000 self.__required_parameters[name] = {
1001 "default" : default,
1002 "typecast" : typecast,
1005 "listval" : listval,
1006 "listadv" : listadv,
1007 "message" : message,
1008 "oldname" : oldname,
1010 self.__canonical_parameter_name[name.lower()] = name
1011 if oldname is not None:
1012 self.__canonical_parameter_name[oldname.lower()] = name # Conversion
1013 self.__replace_by_the_new_name[oldname.lower()] = name
1014 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
1016 def getRequiredParameters(self, noDetails=True):
1018 Renvoie la liste des noms de paramètres requis ou directement le
1019 dictionnaire des paramètres requis.
1022 return sorted(self.__required_parameters.keys())
1024 return self.__required_parameters
1026 def setParameterValue(self, name=None, value=None):
1028 Renvoie la valeur d'un paramètre requis de manière contrôlée
1030 __k = self.__canonical_parameter_name[name.lower()]
1031 default = self.__required_parameters[__k]["default"]
1032 typecast = self.__required_parameters[__k]["typecast"]
1033 minval = self.__required_parameters[__k]["minval"]
1034 maxval = self.__required_parameters[__k]["maxval"]
1035 listval = self.__required_parameters[__k]["listval"]
1036 listadv = self.__required_parameters[__k]["listadv"]
1038 if value is None and default is None:
1040 elif value is None and default is not None:
1041 if typecast is None: __val = default
1042 else: __val = typecast( default )
1044 if typecast is None: __val = value
1047 __val = typecast( value )
1049 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
1051 if minval is not None and (numpy.array(__val, float) < minval).any():
1052 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
1053 if maxval is not None and (numpy.array(__val, float) > maxval).any():
1054 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
1055 if listval is not None or listadv is not None:
1056 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
1058 if listval is not None and v in listval: continue
1059 elif listadv is not None and v in listadv: continue
1061 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
1062 elif not (listval is not None and __val in listval) and not (listadv is not None and __val in listadv):
1063 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
1067 def requireInputArguments(self, mandatory=(), optional=()):
1069 Permet d'imposer des arguments de calcul requis en entrée.
1071 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
1072 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
1074 def getInputArguments(self):
1076 Permet d'obtenir les listes des arguments de calcul requis en entrée.
1078 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
1080 def setAttributes(self, tags=()):
1082 Permet d'adjoindre des attributs comme les tags de classification.
1083 Renvoie la liste actuelle dans tous les cas.
1085 self.__required_inputs["ClassificationTags"].extend( tags )
1086 return self.__required_inputs["ClassificationTags"]
1088 def __setParameters(self, fromDico={}, reset=False):
1090 Permet de stocker les paramètres reçus dans le dictionnaire interne.
1092 self._parameters.update( fromDico )
1093 __inverse_fromDico_keys = {}
1094 for k in fromDico.keys():
1095 if k.lower() in self.__canonical_parameter_name:
1096 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
1097 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
1098 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
1100 for k in __inverse_fromDico_keys.values():
1101 if k.lower() in self.__replace_by_the_new_name:
1102 __newk = self.__replace_by_the_new_name[k.lower()]
1103 __msg = "the parameter \"%s\" used in \"%s\" algorithm case is deprecated and has to be replaced by \"%s\"."%(k,self._name,__newk)
1104 __msg += " Please update your code."
1105 warnings.warn(__msg, FutureWarning, stacklevel=50)
1107 for k in self.__required_parameters.keys():
1108 if k in __canonic_fromDico_keys:
1109 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
1111 self._parameters[k] = self.setParameterValue(k)
1114 if hasattr(self._parameters[k],"__len__") and len(self._parameters[k]) > 100:
1115 logging.debug("%s %s de longueur %s", self._name, self.__required_parameters[k]["message"], len(self._parameters[k]))
1117 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
1119 def _setInternalState(self, key=None, value=None, fromDico={}, reset=False):
1121 Permet de stocker des variables nommées constituant l'état interne
1123 if reset: # Vide le dictionnaire préalablement
1124 self.__internal_state = {}
1125 if key is not None and value is not None:
1126 self.__internal_state[key] = value
1127 self.__internal_state.update( dict(fromDico) )
1129 def _getInternalState(self, key=None):
1131 Restitue un état interne sous la forme d'un dictionnaire de variables nommées
1133 if key is not None and key in self.__internal_state:
1134 return self.__internal_state[key]
1136 return self.__internal_state
1138 def _getTimeState(self, reset=False):
1140 Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde
1143 self.__initial_cpu_time = time.process_time()
1144 self.__initial_elapsed_time = time.perf_counter()
1147 self.__cpu_time = time.process_time() - self.__initial_cpu_time
1148 self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time
1149 return self.__cpu_time, self.__elapsed_time
1151 def _StopOnTimeLimit(self, X=None, withReason=False):
1152 "Stop criteria on time limit: True/False [+ Reason]"
1153 c, e = self._getTimeState()
1154 if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]:
1155 __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"])
1156 elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]:
1157 __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"])
1159 __SC, __SR = False, ""
1165 # ==============================================================================
1166 class PartialAlgorithm(object):
1168 Classe pour mimer "Algorithm" du point de vue stockage, mais sans aucune
1169 action avancée comme la vérification . Pour les méthodes reprises ici,
1170 le fonctionnement est identique à celles de la classe "Algorithm".
1172 def __init__(self, name):
1173 self._name = str( name )
1174 self._parameters = {"StoreSupplementaryCalculations":[]}
1176 self.StoredVariables = {}
1177 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
1178 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
1179 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
1180 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
1181 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
1182 self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
1184 self.__canonical_stored_name = {}
1185 for k in self.StoredVariables:
1186 self.__canonical_stored_name[k.lower()] = k
1188 def _toStore(self, key):
1189 "True if in StoreSupplementaryCalculations, else False"
1190 return key in self._parameters["StoreSupplementaryCalculations"]
1192 def get(self, key=None):
1194 Renvoie l'une des variables stockées identifiée par la clé, ou le
1195 dictionnaire de l'ensemble des variables disponibles en l'absence de
1196 clé. Ce sont directement les variables sous forme objet qui sont
1197 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
1198 des classes de persistance.
1201 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
1203 return self.StoredVariables
1205 # ==============================================================================
1206 class AlgorithmAndParameters(object):
1208 Classe générale d'interface d'action pour l'algorithme et ses paramètres
1211 name = "GenericAlgorithm",
1218 self.__name = str(name)
1222 self.__algorithm = {}
1223 self.__algorithmFile = None
1224 self.__algorithmName = None
1226 self.updateParameters( asDict, asScript )
1228 if asAlgorithm is None and asScript is not None:
1229 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1231 __Algo = asAlgorithm
1233 if __Algo is not None:
1234 self.__A = str(__Algo)
1235 self.__P.update( {"Algorithm":self.__A} )
1237 self.__setAlgorithm( self.__A )
1239 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
1241 def updateParameters(self,
1245 "Mise a jour des parametres"
1246 if asDict is None and asScript is not None:
1247 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1251 if __Dict is not None:
1252 self.__P.update( dict(__Dict) )
1254 def executePythonScheme(self, asDictAO = None):
1255 "Permet de lancer le calcul d'assimilation"
1256 Operator.CM.clearCache()
1258 if not isinstance(asDictAO, dict):
1259 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1260 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
1261 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
1262 else: self.__Xb = None
1263 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
1264 else: self.__Y = asDictAO["Observation"]
1265 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
1266 else: self.__U = asDictAO["ControlInput"]
1267 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
1268 else: self.__HO = asDictAO["ObservationOperator"]
1269 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
1270 else: self.__EM = asDictAO["EvolutionModel"]
1271 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
1272 else: self.__CM = asDictAO["ControlModel"]
1273 self.__B = asDictAO["BackgroundError"]
1274 self.__R = asDictAO["ObservationError"]
1275 self.__Q = asDictAO["EvolutionError"]
1277 self.__shape_validate()
1279 self.__algorithm.run(
1289 Parameters = self.__P,
1293 def executeYACSScheme(self, FileName=None):
1294 "Permet de lancer le calcul d'assimilation"
1295 if FileName is None or not os.path.exists(FileName):
1296 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1298 __file = os.path.abspath(FileName)
1299 logging.debug("The YACS file name is \"%s\"."%__file)
1300 if not PlatformInfo.has_salome or \
1301 not PlatformInfo.has_yacs or \
1302 not PlatformInfo.has_adao:
1303 raise ImportError("\n\n"+\
1304 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
1305 "Please load the right environnement before trying to use it.\n")
1308 import SALOMERuntime
1310 SALOMERuntime.RuntimeSALOME_setRuntime()
1312 r = pilot.getRuntime()
1313 xmlLoader = loader.YACSLoader()
1314 xmlLoader.registerProcCataLoader()
1316 catalogAd = r.loadCatalog("proc", __file)
1317 r.addCatalog(catalogAd)
1322 p = xmlLoader.load(__file)
1323 except IOError as ex:
1324 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1326 logger = p.getLogger("parser")
1327 if not logger.isEmpty():
1328 print("The imported YACS XML schema has errors on parsing:")
1329 print(logger.getStr())
1332 print("The YACS XML schema is not valid and will not be executed:")
1333 print(p.getErrorReport())
1335 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1336 p.checkConsistency(info)
1337 if info.areWarningsOrErrors():
1338 print("The YACS XML schema is not coherent and will not be executed:")
1339 print(info.getGlobalRepr())
1341 e = pilot.ExecutorSwig()
1343 if p.getEffectiveState() != pilot.DONE:
1344 print(p.getErrorReport())
1348 def get(self, key = None):
1349 "Vérifie l'existence d'une clé de variable ou de paramètres"
1350 if key in self.__algorithm:
1351 return self.__algorithm.get( key )
1352 elif key in self.__P:
1353 return self.__P[key]
1355 allvariables = self.__P
1356 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1359 def pop(self, k, d):
1360 "Necessaire pour le pickling"
1361 return self.__algorithm.pop(k, d)
1363 def getAlgorithmRequiredParameters(self, noDetails=True):
1364 "Renvoie la liste des paramètres requis selon l'algorithme"
1365 return self.__algorithm.getRequiredParameters(noDetails)
1367 def getAlgorithmInputArguments(self):
1368 "Renvoie la liste des entrées requises selon l'algorithme"
1369 return self.__algorithm.getInputArguments()
1371 def getAlgorithmAttributes(self):
1372 "Renvoie la liste des attributs selon l'algorithme"
1373 return self.__algorithm.setAttributes()
1375 def setObserver(self, __V, __O, __I, __S):
1376 if self.__algorithm is None \
1377 or isinstance(self.__algorithm, dict) \
1378 or not hasattr(self.__algorithm,"StoredVariables"):
1379 raise ValueError("No observer can be build before choosing an algorithm.")
1380 if __V not in self.__algorithm:
1381 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1383 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1386 HookParameters = __I,
1389 def removeObserver(self, __V, __O, __A = False):
1390 if self.__algorithm is None \
1391 or isinstance(self.__algorithm, dict) \
1392 or not hasattr(self.__algorithm,"StoredVariables"):
1393 raise ValueError("No observer can be removed before choosing an algorithm.")
1394 if __V not in self.__algorithm:
1395 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1397 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1402 def hasObserver(self, __V):
1403 if self.__algorithm is None \
1404 or isinstance(self.__algorithm, dict) \
1405 or not hasattr(self.__algorithm,"StoredVariables"):
1407 if __V not in self.__algorithm:
1409 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1412 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1413 for k in self.__variable_names_not_public:
1414 if k in __allvariables: __allvariables.remove(k)
1415 return __allvariables
1417 def __contains__(self, key=None):
1418 "D.__contains__(k) -> True if D has a key k, else False"
1419 return key in self.__algorithm or key in self.__P
1422 "x.__repr__() <==> repr(x)"
1423 return repr(self.__A)+", "+repr(self.__P)
1426 "x.__str__() <==> str(x)"
1427 return str(self.__A)+", "+str(self.__P)
1429 def __setAlgorithm(self, choice = None ):
1431 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1432 d'assimilation. L'argument est un champ caractère se rapportant au nom
1433 d'un algorithme réalisant l'opération sur les arguments fixes.
1436 raise ValueError("Error: algorithm choice has to be given")
1437 if self.__algorithmName is not None:
1438 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1439 daDirectory = "daAlgorithms"
1441 # Recherche explicitement le fichier complet
1442 # ------------------------------------------
1444 for directory in sys.path:
1445 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1446 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1447 if module_path is None:
1449 "No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1451 # Importe le fichier complet comme un module
1452 # ------------------------------------------
1454 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1455 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1456 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1457 raise ImportError("this module does not define a valid elementary algorithm.")
1458 self.__algorithmName = str(choice)
1459 sys.path = sys_path_tmp ; del sys_path_tmp
1460 except ImportError as e:
1462 "The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1464 # Instancie un objet du type élémentaire du fichier
1465 # -------------------------------------------------
1466 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1469 def __shape_validate(self):
1471 Validation de la correspondance correcte des tailles des variables et
1472 des matrices s'il y en a.
1474 if self.__Xb is None: __Xb_shape = (0,)
1475 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1476 elif hasattr(self.__Xb,"shape"):
1477 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1478 else: __Xb_shape = self.__Xb.shape()
1479 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1481 if self.__Y is None: __Y_shape = (0,)
1482 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1483 elif hasattr(self.__Y,"shape"):
1484 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1485 else: __Y_shape = self.__Y.shape()
1486 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1488 if self.__U is None: __U_shape = (0,)
1489 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1490 elif hasattr(self.__U,"shape"):
1491 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1492 else: __U_shape = self.__U.shape()
1493 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1495 if self.__B is None: __B_shape = (0,0)
1496 elif hasattr(self.__B,"shape"):
1497 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1498 else: __B_shape = self.__B.shape()
1499 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1501 if self.__R is None: __R_shape = (0,0)
1502 elif hasattr(self.__R,"shape"):
1503 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1504 else: __R_shape = self.__R.shape()
1505 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1507 if self.__Q is None: __Q_shape = (0,0)
1508 elif hasattr(self.__Q,"shape"):
1509 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1510 else: __Q_shape = self.__Q.shape()
1511 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1513 if len(self.__HO) == 0: __HO_shape = (0,0)
1514 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1515 elif hasattr(self.__HO["Direct"],"shape"):
1516 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1517 else: __HO_shape = self.__HO["Direct"].shape()
1518 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1520 if len(self.__EM) == 0: __EM_shape = (0,0)
1521 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1522 elif hasattr(self.__EM["Direct"],"shape"):
1523 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1524 else: __EM_shape = self.__EM["Direct"].shape()
1525 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1527 if len(self.__CM) == 0: __CM_shape = (0,0)
1528 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1529 elif hasattr(self.__CM["Direct"],"shape"):
1530 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1531 else: __CM_shape = self.__CM["Direct"].shape()
1532 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1534 # Vérification des conditions
1535 # ---------------------------
1536 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1537 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1538 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1539 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1541 if not( min(__B_shape) == max(__B_shape) ):
1542 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1543 if not( min(__R_shape) == max(__R_shape) ):
1544 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1545 if not( min(__Q_shape) == max(__Q_shape) ):
1546 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1547 if not( min(__EM_shape) == max(__EM_shape) ):
1548 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1550 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1552 "Shape characteristic of observation operator (H)"+\
1553 " \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1554 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1556 "Shape characteristic of observation operator (H)"+\
1557 " \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1558 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1560 "Shape characteristic of observation operator (H)"+\
1561 " \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1562 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1564 "Shape characteristic of observation operator (H)"+\
1565 " \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1567 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1568 if self.__algorithmName in ["EnsembleBlue",]:
1569 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1570 self.__Xb = Persistence.OneVector("Background")
1571 for member in asPersistentVector:
1572 self.__Xb.store( numpy.asarray(member, dtype=float) )
1573 __Xb_shape = min(__B_shape)
1576 "Shape characteristic of a priori errors covariance matrix (B)"+\
1577 " \"%s\" and background vector (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1579 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1581 "Shape characteristic of observation errors covariance matrix (R)"+\
1582 " \"%s\" and observation vector (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1584 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1586 "Shape characteristic of evolution model (EM)"+\
1587 " \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1589 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1591 "Shape characteristic of control model (CM)"+\
1592 " \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1594 if ("Bounds" in self.__P) \
1595 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1596 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1597 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1598 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1600 if ("StateBoundsForQuantiles" in self.__P) \
1601 and (isinstance(self.__P["StateBoundsForQuantiles"], list) or isinstance(self.__P["StateBoundsForQuantiles"], tuple)) \
1602 and (len(self.__P["StateBoundsForQuantiles"]) != max(__Xb_shape)):
1603 raise ValueError("The number \"%s\" of bound pairs for the quantile state (X) components is different of the size \"%s\" of the state itself." \
1604 %(len(self.__P["StateBoundsForQuantiles"]),max(__Xb_shape)))
1608 # ==============================================================================
1609 class RegulationAndParameters(object):
1611 Classe générale d'interface d'action pour la régulation et ses paramètres
1614 name = "GenericRegulation",
1621 self.__name = str(name)
1624 if asAlgorithm is None and asScript is not None:
1625 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1627 __Algo = asAlgorithm
1629 if asDict is None and asScript is not None:
1630 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1634 if __Dict is not None:
1635 self.__P.update( dict(__Dict) )
1637 if __Algo is not None:
1638 self.__P.update( {"Algorithm":str(__Algo)} )
1640 def get(self, key = None):
1641 "Vérifie l'existence d'une clé de variable ou de paramètres"
1643 return self.__P[key]
1647 # ==============================================================================
1648 class DataObserver(object):
1650 Classe générale d'interface de type observer
1653 name = "GenericObserver",
1665 self.__name = str(name)
1670 if onVariable is None:
1671 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1672 elif type(onVariable) in (tuple, list):
1673 self.__V = tuple(map( str, onVariable ))
1674 if withInfo is None:
1677 self.__I = (str(withInfo),)*len(self.__V)
1678 elif isinstance(onVariable, str):
1679 self.__V = (onVariable,)
1680 if withInfo is None:
1681 self.__I = (onVariable,)
1683 self.__I = (str(withInfo),)
1685 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1687 if asObsObject is not None:
1688 self.__O = asObsObject
1690 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1691 __Function = Observer2Func(__FunctionText)
1692 self.__O = __Function.getfunc()
1694 for k in range(len(self.__V)):
1697 if ename not in withAlgo:
1698 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1700 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1703 "x.__repr__() <==> repr(x)"
1704 return repr(self.__V)+"\n"+repr(self.__O)
1707 "x.__str__() <==> str(x)"
1708 return str(self.__V)+"\n"+str(self.__O)
1710 # ==============================================================================
1711 class UserScript(object):
1713 Classe générale d'interface de type texte de script utilisateur
1716 name = "GenericUserScript",
1723 self.__name = str(name)
1725 if asString is not None:
1727 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1728 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1729 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1730 self.__F = Templates.ObserverTemplates[asTemplate]
1731 elif asScript is not None:
1732 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1737 "x.__repr__() <==> repr(x)"
1738 return repr(self.__F)
1741 "x.__str__() <==> str(x)"
1742 return str(self.__F)
1744 # ==============================================================================
1745 class ExternalParameters(object):
1747 Classe générale d'interface de type texte de script utilisateur
1750 name = "GenericExternalParameters",
1756 self.__name = str(name)
1759 self.updateParameters( asDict, asScript )
1761 def updateParameters(self,
1765 "Mise a jour des parametres"
1766 if asDict is None and asScript is not None:
1767 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1771 if __Dict is not None:
1772 self.__P.update( dict(__Dict) )
1774 def get(self, key = None):
1776 return self.__P[key]
1778 return list(self.__P.keys())
1781 return list(self.__P.keys())
1783 def pop(self, k, d):
1784 return self.__P.pop(k, d)
1787 return self.__P.items()
1789 def __contains__(self, key=None):
1790 "D.__contains__(k) -> True if D has a key k, else False"
1791 return key in self.__P
1793 # ==============================================================================
1794 class State(object):
1796 Classe générale d'interface de type état
1799 name = "GenericVector",
1801 asPersistentVector = None,
1807 toBeChecked = False,
1810 Permet de définir un vecteur :
1811 - asVector : entrée des données, comme un vecteur compatible avec le
1812 constructeur de numpy.matrix, ou "True" si entrée par script.
1813 - asPersistentVector : entrée des données, comme une série de vecteurs
1814 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1815 type Persistence, ou "True" si entrée par script.
1816 - asScript : si un script valide est donné contenant une variable
1817 nommée "name", la variable est de type "asVector" (par défaut) ou
1818 "asPersistentVector" selon que l'une de ces variables est placée à
1820 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1821 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1822 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1823 nommée "name"), on récupère les colonnes et on les range ligne après
1824 ligne (colMajor=False, par défaut) ou colonne après colonne
1825 (colMajor=True). La variable résultante est de type "asVector" (par
1826 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1829 self.__name = str(name)
1830 self.__check = bool(toBeChecked)
1834 self.__is_vector = False
1835 self.__is_series = False
1837 if asScript is not None:
1838 __Vector, __Series = None, None
1839 if asPersistentVector:
1840 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1842 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1843 elif asDataFile is not None:
1844 __Vector, __Series = None, None
1845 if asPersistentVector:
1846 if colNames is not None:
1847 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1849 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1850 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1851 __Series = numpy.transpose(__Series)
1852 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1853 __Series = numpy.transpose(__Series)
1855 if colNames is not None:
1856 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1858 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1860 __Vector = numpy.ravel(__Vector, order = "F")
1862 __Vector = numpy.ravel(__Vector, order = "C")
1864 __Vector, __Series = asVector, asPersistentVector
1866 if __Vector is not None:
1867 self.__is_vector = True
1868 if isinstance(__Vector, str):
1869 __Vector = PlatformInfo.strvect2liststr( __Vector )
1870 self.__V = numpy.ravel(numpy.asarray( __Vector, dtype=float )).reshape((-1,1))
1871 self.shape = self.__V.shape
1872 self.size = self.__V.size
1873 elif __Series is not None:
1874 self.__is_series = True
1875 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1876 self.__V = Persistence.OneVector(self.__name)
1877 if isinstance(__Series, str):
1878 __Series = PlatformInfo.strmatrix2liststr(__Series)
1879 for member in __Series:
1880 if isinstance(member, str):
1881 member = PlatformInfo.strvect2liststr( member )
1882 self.__V.store(numpy.asarray( member, dtype=float ))
1885 if isinstance(self.__V.shape, (tuple, list)):
1886 self.shape = self.__V.shape
1888 self.shape = self.__V.shape()
1889 if len(self.shape) == 1:
1890 self.shape = (self.shape[0],1)
1891 self.size = self.shape[0] * self.shape[1]
1894 "The %s object is improperly defined or undefined,"%self.__name+\
1895 " it requires at minima either a vector, a list/tuple of"+\
1896 " vectors or a persistent object. Please check your vector input.")
1898 if scheduledBy is not None:
1899 self.__T = scheduledBy
1901 def getO(self, withScheduler=False):
1903 return self.__V, self.__T
1904 elif self.__T is None:
1910 "Vérification du type interne"
1911 return self.__is_vector
1914 "Vérification du type interne"
1915 return self.__is_series
1918 "x.__repr__() <==> repr(x)"
1919 return repr(self.__V)
1922 "x.__str__() <==> str(x)"
1923 return str(self.__V)
1925 # ==============================================================================
1926 class Covariance(object):
1928 Classe générale d'interface de type covariance
1931 name = "GenericCovariance",
1932 asCovariance = None,
1933 asEyeByScalar = None,
1934 asEyeByVector = None,
1937 toBeChecked = False,
1940 Permet de définir une covariance :
1941 - asCovariance : entrée des données, comme une matrice compatible avec
1942 le constructeur de numpy.matrix
1943 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1944 multiplicatif d'une matrice de corrélation identité, aucune matrice
1945 n'étant donc explicitement à donner
1946 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1947 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1948 n'étant donc explicitement à donner
1949 - asCovObject : entrée des données comme un objet python, qui a les
1950 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1951 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1952 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1953 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1954 pleine doit être vérifié
1956 self.__name = str(name)
1957 self.__check = bool(toBeChecked)
1960 self.__is_scalar = False
1961 self.__is_vector = False
1962 self.__is_matrix = False
1963 self.__is_object = False
1965 if asScript is not None:
1966 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1968 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1970 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1972 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1974 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1976 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1978 if __Scalar is not None:
1979 if isinstance(__Scalar, str):
1980 __Scalar = PlatformInfo.strvect2liststr( __Scalar )
1981 if len(__Scalar) > 0: __Scalar = __Scalar[0]
1982 if numpy.array(__Scalar).size != 1:
1984 " The diagonal multiplier given to define a sparse matrix is"+\
1985 " not a unique scalar value.\n Its actual measured size is"+\
1986 " %i. Please check your scalar input."%numpy.array(__Scalar).size)
1987 self.__is_scalar = True
1988 self.__C = numpy.abs( float(__Scalar) )
1991 elif __Vector is not None:
1992 if isinstance(__Vector, str):
1993 __Vector = PlatformInfo.strvect2liststr( __Vector )
1994 self.__is_vector = True
1995 self.__C = numpy.abs( numpy.ravel(numpy.asarray( __Vector, dtype=float )) )
1996 self.shape = (self.__C.size,self.__C.size)
1997 self.size = self.__C.size**2
1998 elif __Matrix is not None:
1999 self.__is_matrix = True
2000 self.__C = numpy.matrix( __Matrix, float )
2001 self.shape = self.__C.shape
2002 self.size = self.__C.size
2003 elif __Object is not None:
2004 self.__is_object = True
2006 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__matmul__","__mul__","__rmatmul__","__rmul__"):
2007 if not hasattr(self.__C,at):
2008 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
2009 if hasattr(self.__C,"shape"):
2010 self.shape = self.__C.shape
2013 if hasattr(self.__C,"size"):
2014 self.size = self.__C.size
2022 def __validate(self):
2024 if self.__C is None:
2025 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
2026 if self.ismatrix() and min(self.shape) != max(self.shape):
2027 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
2028 if self.isobject() and min(self.shape) != max(self.shape):
2029 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
2030 if self.isscalar() and self.__C <= 0:
2031 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
2032 if self.isvector() and (self.__C <= 0).any():
2033 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
2034 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
2036 numpy.linalg.cholesky( self.__C )
2038 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
2039 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
2043 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
2046 "Vérification du type interne"
2047 return self.__is_scalar
2050 "Vérification du type interne"
2051 return self.__is_vector
2054 "Vérification du type interne"
2055 return self.__is_matrix
2058 "Vérification du type interne"
2059 return self.__is_object
2064 return Covariance(self.__name+"I", asCovariance = numpy.linalg.inv(self.__C) )
2065 elif self.isvector():
2066 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
2067 elif self.isscalar():
2068 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
2069 elif self.isobject() and hasattr(self.__C,"getI"):
2070 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
2072 return None # Indispensable
2077 return Covariance(self.__name+"T", asCovariance = self.__C.T )
2078 elif self.isvector():
2079 return Covariance(self.__name+"T", asEyeByVector = self.__C )
2080 elif self.isscalar():
2081 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
2082 elif self.isobject() and hasattr(self.__C,"getT"):
2083 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
2085 raise AttributeError("the %s covariance matrix has no getT attribute."%(self.__name,))
2088 "Décomposition de Cholesky"
2090 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
2091 elif self.isvector():
2092 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
2093 elif self.isscalar():
2094 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
2095 elif self.isobject() and hasattr(self.__C,"cholesky"):
2096 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
2098 raise AttributeError("the %s covariance matrix has no cholesky attribute."%(self.__name,))
2100 def choleskyI(self):
2101 "Inversion de la décomposition de Cholesky"
2103 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.linalg.cholesky(self.__C)) )
2104 elif self.isvector():
2105 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
2106 elif self.isscalar():
2107 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
2108 elif self.isobject() and hasattr(self.__C,"choleskyI"):
2109 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
2111 raise AttributeError("the %s covariance matrix has no choleskyI attribute."%(self.__name,))
2114 "Racine carrée matricielle"
2117 return Covariance(self.__name+"C", asCovariance = numpy.real(scipy.linalg.sqrtm(self.__C)) )
2118 elif self.isvector():
2119 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
2120 elif self.isscalar():
2121 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
2122 elif self.isobject() and hasattr(self.__C,"sqrtm"):
2123 return Covariance(self.__name+"C", asCovObject = self.__C.sqrtm() )
2125 raise AttributeError("the %s covariance matrix has no sqrtm attribute."%(self.__name,))
2128 "Inversion de la racine carrée matricielle"
2131 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.real(scipy.linalg.sqrtm(self.__C))) )
2132 elif self.isvector():
2133 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
2134 elif self.isscalar():
2135 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
2136 elif self.isobject() and hasattr(self.__C,"sqrtmI"):
2137 return Covariance(self.__name+"H", asCovObject = self.__C.sqrtmI() )
2139 raise AttributeError("the %s covariance matrix has no sqrtmI attribute."%(self.__name,))
2141 def diag(self, msize=None):
2142 "Diagonale de la matrice"
2144 return numpy.diag(self.__C)
2145 elif self.isvector():
2147 elif self.isscalar():
2149 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2151 return self.__C * numpy.ones(int(msize))
2152 elif self.isobject() and hasattr(self.__C,"diag"):
2153 return self.__C.diag()
2155 raise AttributeError("the %s covariance matrix has no diag attribute."%(self.__name,))
2157 def trace(self, msize=None):
2158 "Trace de la matrice"
2160 return numpy.trace(self.__C)
2161 elif self.isvector():
2162 return float(numpy.sum(self.__C))
2163 elif self.isscalar():
2165 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2167 return self.__C * int(msize)
2168 elif self.isobject():
2169 return self.__C.trace()
2171 raise AttributeError("the %s covariance matrix has no trace attribute."%(self.__name,))
2173 def asfullmatrix(self, msize=None):
2176 return numpy.asarray(self.__C, dtype=float)
2177 elif self.isvector():
2178 return numpy.asarray( numpy.diag(self.__C), dtype=float )
2179 elif self.isscalar():
2181 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2183 return numpy.asarray( self.__C * numpy.eye(int(msize)), dtype=float )
2184 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
2185 return self.__C.asfullmatrix()
2187 raise AttributeError("the %s covariance matrix has no asfullmatrix attribute."%(self.__name,))
2189 def assparsematrix(self):
2197 "x.__repr__() <==> repr(x)"
2198 return repr(self.__C)
2201 "x.__str__() <==> str(x)"
2202 return str(self.__C)
2204 def __add__(self, other):
2205 "x.__add__(y) <==> x+y"
2206 if self.ismatrix() or self.isobject():
2207 return self.__C + numpy.asmatrix(other)
2208 elif self.isvector() or self.isscalar():
2209 _A = numpy.asarray(other)
2210 if len(_A.shape) == 1:
2211 _A.reshape((-1,1))[::2] += self.__C
2213 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
2214 return numpy.asmatrix(_A)
2216 def __radd__(self, other):
2217 "x.__radd__(y) <==> y+x"
2218 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
2220 def __sub__(self, other):
2221 "x.__sub__(y) <==> x-y"
2222 if self.ismatrix() or self.isobject():
2223 return self.__C - numpy.asmatrix(other)
2224 elif self.isvector() or self.isscalar():
2225 _A = numpy.asarray(other)
2226 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
2227 return numpy.asmatrix(_A)
2229 def __rsub__(self, other):
2230 "x.__rsub__(y) <==> y-x"
2231 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
2234 "x.__neg__() <==> -x"
2237 def __matmul__(self, other):
2238 "x.__mul__(y) <==> x@y"
2239 if self.ismatrix() and isinstance(other, (int, float)):
2240 return numpy.asarray(self.__C) * other
2241 elif self.ismatrix() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2242 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2243 return numpy.ravel(self.__C @ numpy.ravel(other))
2244 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2245 return numpy.asarray(self.__C) @ numpy.asarray(other)
2247 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asarray(other).shape,self.__name))
2248 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2249 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2250 return numpy.ravel(self.__C) * numpy.ravel(other)
2251 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2252 return numpy.ravel(self.__C).reshape((-1,1)) * numpy.asarray(other)
2254 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2255 elif self.isscalar() and isinstance(other,numpy.matrix):
2256 return numpy.asarray(self.__C * other)
2257 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2258 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2259 return self.__C * numpy.ravel(other)
2261 return self.__C * numpy.asarray(other)
2262 elif self.isobject():
2263 return self.__C.__matmul__(other)
2265 raise NotImplementedError("%s covariance matrix __matmul__ method not available for %s type!"%(self.__name,type(other)))
2267 def __mul__(self, other):
2268 "x.__mul__(y) <==> x*y"
2269 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2270 return self.__C * other
2271 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2272 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2273 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2274 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2275 return self.__C * numpy.asmatrix(other)
2278 "operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
2279 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2280 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2281 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
2282 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2283 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
2286 "operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2287 elif self.isscalar() and isinstance(other,numpy.matrix):
2288 return self.__C * other
2289 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2290 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2291 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2293 return self.__C * numpy.asmatrix(other)
2294 elif self.isobject():
2295 return self.__C.__mul__(other)
2297 raise NotImplementedError(
2298 "%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
2300 def __rmatmul__(self, other):
2301 "x.__rmul__(y) <==> y@x"
2302 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2303 return other * self.__C
2304 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2305 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2306 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2307 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2308 return numpy.asmatrix(other) * self.__C
2311 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2312 elif self.isvector() and isinstance(other,numpy.matrix):
2313 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2314 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2315 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2316 return numpy.asmatrix(numpy.array(other) * self.__C)
2319 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2320 elif self.isscalar() and isinstance(other,numpy.matrix):
2321 return other * self.__C
2322 elif self.isobject():
2323 return self.__C.__rmatmul__(other)
2325 raise NotImplementedError(
2326 "%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name,type(other)))
2328 def __rmul__(self, other):
2329 "x.__rmul__(y) <==> y*x"
2330 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2331 return other * self.__C
2332 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2333 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2334 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2335 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2336 return numpy.asmatrix(other) * self.__C
2339 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2340 elif self.isvector() and isinstance(other,numpy.matrix):
2341 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2342 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2343 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2344 return numpy.asmatrix(numpy.array(other) * self.__C)
2347 "operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2348 elif self.isscalar() and isinstance(other,numpy.matrix):
2349 return other * self.__C
2350 elif self.isscalar() and isinstance(other,float):
2351 return other * self.__C
2352 elif self.isobject():
2353 return self.__C.__rmul__(other)
2355 raise NotImplementedError(
2356 "%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
2359 "x.__len__() <==> len(x)"
2360 return self.shape[0]
2362 # ==============================================================================
2363 class Observer2Func(object):
2365 Création d'une fonction d'observateur a partir de son texte
2367 def __init__(self, corps=""):
2368 self.__corps = corps
2369 def func(self,var,info):
2370 "Fonction d'observation"
2373 "Restitution du pointeur de fonction dans l'objet"
2376 # ==============================================================================
2377 class CaseLogger(object):
2379 Conservation des commandes de création d'un cas
2381 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
2382 self.__name = str(__name)
2383 self.__objname = str(__objname)
2384 self.__logSerie = []
2385 self.__switchoff = False
2387 "TUI" :Interfaces._TUIViewer,
2388 "SCD" :Interfaces._SCDViewer,
2389 "YACS":Interfaces._YACSViewer,
2390 "SimpleReportInRst":Interfaces._SimpleReportInRstViewer,
2391 "SimpleReportInHtml":Interfaces._SimpleReportInHtmlViewer,
2392 "SimpleReportInPlainTxt":Interfaces._SimpleReportInPlainTxtViewer,
2395 "TUI" :Interfaces._TUIViewer,
2396 "COM" :Interfaces._COMViewer,
2398 if __addViewers is not None:
2399 self.__viewers.update(dict(__addViewers))
2400 if __addLoaders is not None:
2401 self.__loaders.update(dict(__addLoaders))
2403 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
2404 "Enregistrement d'une commande individuelle"
2405 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
2406 if "self" in __keys: __keys.remove("self")
2407 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
2409 self.__switchoff = True
2411 self.__switchoff = False
2413 def dump(self, __filename=None, __format="TUI", __upa=""):
2414 "Restitution normalisée des commandes"
2415 if __format in self.__viewers:
2416 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2418 raise ValueError("Dumping as \"%s\" is not available"%__format)
2419 return __formater.dump(__filename, __upa)
2421 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2422 "Chargement normalisé des commandes"
2423 if __format in self.__loaders:
2424 __formater = self.__loaders[__format]()
2426 raise ValueError("Loading as \"%s\" is not available"%__format)
2427 return __formater.load(__filename, __content, __object)
2429 # ==============================================================================
2432 _extraArguments = None,
2433 _sFunction = lambda x: x,
2438 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2439 correspondante de valeurs de la fonction en argument
2441 # Vérifications et définitions initiales
2442 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2443 if not PlatformInfo.isIterable( __xserie ):
2444 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2446 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2449 __mpWorkers = int(_mpWorkers)
2451 import multiprocessing
2462 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2463 with multiprocessing.Pool(__mpWorkers) as pool:
2464 __multiHX = pool.map( _sFunction, _jobs )
2467 # logging.debug("MULTF Internal multiprocessing calculation end")
2469 # logging.debug("MULTF Internal monoprocessing calculation begin")
2471 if _extraArguments is None:
2472 for __xvalue in __xserie:
2473 __multiHX.append( _sFunction( __xvalue ) )
2474 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2475 for __xvalue in __xserie:
2476 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2477 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2478 for __xvalue in __xserie:
2479 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2481 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2482 # logging.debug("MULTF Internal monoprocessing calculation end")
2484 # logging.debug("MULTF Internal multifonction calculations end")
2487 # ==============================================================================
2488 if __name__ == "__main__":
2489 print('\n AUTODIAGNOSTIC\n')