1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2022 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
35 from functools import partial
36 from daCore import Persistence, PlatformInfo, Interfaces
37 from daCore import Templates
39 # ==============================================================================
40 class CacheManager(object):
42 Classe générale de gestion d'un cache de calculs
45 toleranceInRedundancy = 1.e-18,
46 lenghtOfRedundancy = -1,
49 Les caractéristiques de tolérance peuvent être modifiées à la création.
51 self.__tolerBP = float(toleranceInRedundancy)
52 self.__lenghtOR = int(lenghtOfRedundancy)
53 self.__initlnOR = self.__lenghtOR
63 def wasCalculatedIn(self, xValue, oName="" ):
64 "Vérifie l'existence d'un calcul correspondant à la valeur"
68 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
69 if not hasattr(xValue, 'size'):
71 elif (str(oName) != self.__listOPCV[i][3]):
73 elif (xValue.size != self.__listOPCV[i][0].size):
75 elif (numpy.ravel(xValue)[0] - self.__listOPCV[i][0][0]) > (self.__tolerBP * self.__listOPCV[i][2] / self.__listOPCV[i][0].size):
77 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < (self.__tolerBP * self.__listOPCV[i][2]):
79 __HxV = self.__listOPCV[i][1]
83 def storeValueInX(self, xValue, HxValue, oName="" ):
84 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
85 if self.__lenghtOR < 0:
86 self.__lenghtOR = 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
87 self.__initlnOR = self.__lenghtOR
88 self.__seenNames.append(str(oName))
89 if str(oName) not in self.__seenNames: # Etend la liste si nouveau
90 self.__lenghtOR += 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
91 self.__initlnOR += self.__lenghtOR
92 self.__seenNames.append(str(oName))
93 while len(self.__listOPCV) > self.__lenghtOR:
94 self.__listOPCV.pop(0)
95 self.__listOPCV.append( (
96 copy.copy(numpy.ravel(xValue)), # 0 Previous point
97 copy.copy(HxValue), # 1 Previous value
98 numpy.linalg.norm(xValue), # 2 Norm
99 str(oName), # 3 Operator name
104 self.__initlnOR = self.__lenghtOR
106 self.__enabled = False
110 self.__lenghtOR = self.__initlnOR
111 self.__enabled = True
113 # ==============================================================================
114 class Operator(object):
116 Classe générale d'interface de type opérateur simple
124 name = "GenericOperator",
127 avoidingRedundancy = True,
128 reducingMemoryUse = False,
129 inputAsMultiFunction = False,
130 enableMultiProcess = False,
131 extraArguments = None,
134 On construit un objet de ce type en fournissant, à l'aide de l'un des
135 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
138 - name : nom d'opérateur
139 - fromMethod : argument de type fonction Python
140 - fromMatrix : argument adapté au constructeur numpy.array/matrix
141 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
142 - reducingMemoryUse : booléen forçant (ou pas) des calculs moins
144 - inputAsMultiFunction : booléen indiquant une fonction explicitement
145 définie (ou pas) en multi-fonction
146 - extraArguments : arguments supplémentaires passés à la fonction de
147 base et ses dérivées (tuple ou dictionnaire)
149 self.__name = str(name)
150 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
151 self.__reduceM = bool( reducingMemoryUse )
152 self.__avoidRC = bool( avoidingRedundancy )
153 self.__inputAsMF = bool( inputAsMultiFunction )
154 self.__mpEnabled = bool( enableMultiProcess )
155 self.__extraArgs = extraArguments
156 if fromMethod is not None and self.__inputAsMF:
157 self.__Method = fromMethod # logtimer(fromMethod)
159 self.__Type = "Method"
160 elif fromMethod is not None and not self.__inputAsMF:
161 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
163 self.__Type = "Method"
164 elif fromMatrix is not None:
166 if isinstance(fromMatrix, str):
167 fromMatrix = PlatformInfo.strmatrix2liststr( fromMatrix )
168 self.__Matrix = numpy.asarray( fromMatrix, dtype=float )
169 self.__Type = "Matrix"
175 def disableAvoidingRedundancy(self):
177 Operator.CM.disable()
179 def enableAvoidingRedundancy(self):
184 Operator.CM.disable()
190 def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False):
192 Permet de restituer le résultat de l'application de l'opérateur à une
193 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
194 argument devant a priori être du bon type.
196 - les arguments par série sont :
197 - xValue : argument adapté pour appliquer l'opérateur
198 - HValue : valeur précalculée de l'opérateur en ce point
199 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
206 if HValue is not None:
210 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
212 if _HValue is not None:
213 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
215 for i in range(len(_HValue)):
216 _HxValue.append( _HValue[i] )
218 Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name)
223 for i, xv in enumerate(_xValue):
225 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
227 __alreadyCalculated = False
229 if __alreadyCalculated:
230 self.__addOneCacheCall()
233 if self.__Matrix is not None:
234 self.__addOneMatrixCall()
235 _hv = self.__Matrix @ numpy.ravel(xv)
237 self.__addOneMethodCall()
241 _HxValue.append( _hv )
243 if len(_xserie)>0 and self.__Matrix is None:
244 if self.__extraArgs is None:
245 _hserie = self.__Method( _xserie ) # Calcul MF
247 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
248 if not hasattr(_hserie, "pop"):
249 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
255 Operator.CM.storeValueInX(_xv,_hv,self.__name)
257 if returnSerieAsArrayMatrix:
258 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
260 if argsAsSerie: return _HxValue
261 else: return _HxValue[-1]
263 def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
265 Permet de restituer le résultat de l'application de l'opérateur à des
266 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
267 argument devant a priori être du bon type. Si la uValue est None,
268 on suppose que l'opérateur ne s'applique qu'à xValue.
270 - paires : les arguments par paire sont :
271 - xValue : argument X adapté pour appliquer l'opérateur
272 - uValue : argument U adapté pour appliquer l'opérateur
273 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
275 if argsAsSerie: _xuValue = paires
276 else: _xuValue = (paires,)
277 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
279 if self.__Matrix is not None:
281 for paire in _xuValue:
282 _xValue, _uValue = paire
283 self.__addOneMatrixCall()
284 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
287 for paire in _xuValue:
288 _xValue, _uValue = paire
289 if _uValue is not None:
290 _xuArgs.append( paire )
292 _xuArgs.append( _xValue )
293 self.__addOneMethodCall( len(_xuArgs) )
294 if self.__extraArgs is None:
295 _HxValue = self.__Method( _xuArgs ) # Calcul MF
297 _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF
299 if returnSerieAsArrayMatrix:
300 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
302 if argsAsSerie: return _HxValue
303 else: return _HxValue[-1]
305 def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
307 Permet de restituer le résultat de l'application de l'opérateur à une
308 série d'arguments xValue, sachant que l'opérateur est valable en
309 xNominal. Cette méthode se contente d'appliquer, son argument devant a
310 priori être du bon type. Si l'opérateur est linéaire car c'est une
311 matrice, alors il est valable en tout point nominal et xNominal peut
312 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
313 permet d'indiquer que l'argument est multi-paires.
315 - paires : les arguments par paire sont :
316 - xNominal : série d'arguments permettant de donner le point où
317 l'opérateur est construit pour être ensuite appliqué
318 - xValue : série d'arguments adaptés pour appliquer l'opérateur
319 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
321 if argsAsSerie: _nxValue = paires
322 else: _nxValue = (paires,)
323 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
325 if self.__Matrix is not None:
327 for paire in _nxValue:
328 _xNominal, _xValue = paire
329 self.__addOneMatrixCall()
330 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
332 self.__addOneMethodCall( len(_nxValue) )
333 if self.__extraArgs is None:
334 _HxValue = self.__Method( _nxValue ) # Calcul MF
336 _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
338 if returnSerieAsArrayMatrix:
339 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
341 if argsAsSerie: return _HxValue
342 else: return _HxValue[-1]
344 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
346 Permet de renvoyer l'opérateur sous la forme d'une matrice
348 if self.__Matrix is not None:
349 self.__addOneMatrixCall()
350 mValue = [self.__Matrix,]
351 elif not isinstance(ValueForMethodForm,str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
354 self.__addOneMethodCall( len(ValueForMethodForm) )
355 for _vfmf in ValueForMethodForm:
356 mValue.append( self.__Method(((_vfmf, None),)) )
358 self.__addOneMethodCall()
359 mValue = self.__Method(((ValueForMethodForm, None),))
361 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
363 if argsAsSerie: return mValue
364 else: return mValue[-1]
368 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
369 la forme d'une matrice
371 if self.__Matrix is not None:
372 return self.__Matrix.shape
374 raise ValueError("Matrix form of the operator is not available, nor the shape")
376 def nbcalls(self, which=None):
378 Renvoie les nombres d'évaluations de l'opérateur
381 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
382 self.__NbCallsAsMatrix,
383 self.__NbCallsAsMethod,
384 self.__NbCallsOfCached,
385 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
386 Operator.NbCallsAsMatrix,
387 Operator.NbCallsAsMethod,
388 Operator.NbCallsOfCached,
390 if which is None: return __nbcalls
391 else: return __nbcalls[which]
393 def __addOneMatrixCall(self):
394 "Comptabilise un appel"
395 self.__NbCallsAsMatrix += 1 # Decompte local
396 Operator.NbCallsAsMatrix += 1 # Decompte global
398 def __addOneMethodCall(self, nb = 1):
399 "Comptabilise un appel"
400 self.__NbCallsAsMethod += nb # Decompte local
401 Operator.NbCallsAsMethod += nb # Decompte global
403 def __addOneCacheCall(self):
404 "Comptabilise un appel"
405 self.__NbCallsOfCached += 1 # Decompte local
406 Operator.NbCallsOfCached += 1 # Decompte global
408 # ==============================================================================
409 class FullOperator(object):
411 Classe générale d'interface de type opérateur complet
412 (Direct, Linéaire Tangent, Adjoint)
415 name = "GenericFullOperator",
417 asOneFunction = None, # 1 Fonction
418 asThreeFunctions = None, # 3 Fonctions in a dictionary
419 asScript = None, # 1 or 3 Fonction(s) by script
420 asDict = None, # Parameters
422 extraArguments = None,
423 performancePrf = None,
424 inputAsMF = False,# Fonction(s) as Multi-Functions
429 self.__name = str(name)
430 self.__check = bool(toBeChecked)
431 self.__extraArgs = extraArguments
436 if (asDict is not None) and isinstance(asDict, dict):
437 __Parameters.update( asDict )
438 # Priorité à EnableMultiProcessingInDerivatives=True
439 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
440 __Parameters["EnableMultiProcessingInDerivatives"] = True
441 __Parameters["EnableMultiProcessingInEvaluation"] = False
442 if "EnableMultiProcessingInDerivatives" not in __Parameters:
443 __Parameters["EnableMultiProcessingInDerivatives"] = False
444 if __Parameters["EnableMultiProcessingInDerivatives"]:
445 __Parameters["EnableMultiProcessingInEvaluation"] = False
446 if "EnableMultiProcessingInEvaluation" not in __Parameters:
447 __Parameters["EnableMultiProcessingInEvaluation"] = False
448 if "withIncrement" in __Parameters: # Temporaire
449 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
450 # Le défaut est équivalent à "ReducedOverallRequirements"
451 __reduceM, __avoidRC = True, True
452 if performancePrf is not None:
453 if performancePrf == "ReducedAmountOfCalculation":
454 __reduceM, __avoidRC = False, True
455 elif performancePrf == "ReducedMemoryFootprint":
456 __reduceM, __avoidRC = True, False
457 elif performancePrf == "NoSavings":
458 __reduceM, __avoidRC = False, False
460 if asScript is not None:
461 __Matrix, __Function = None, None
463 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
465 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
466 __Function.update({"useApproximatedDerivatives":True})
467 __Function.update(__Parameters)
468 elif asThreeFunctions:
470 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
471 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
472 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
474 __Function.update(__Parameters)
477 if asOneFunction is not None:
478 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
479 if asOneFunction["Direct"] is not None:
480 __Function = asOneFunction
482 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
484 __Function = { "Direct":asOneFunction }
485 __Function.update({"useApproximatedDerivatives":True})
486 __Function.update(__Parameters)
487 elif asThreeFunctions is not None:
488 if isinstance(asThreeFunctions, dict) and \
489 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
490 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
491 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
492 __Function = asThreeFunctions
493 elif isinstance(asThreeFunctions, dict) and \
494 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
495 __Function = asThreeFunctions
496 __Function.update({"useApproximatedDerivatives":True})
498 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
499 if "Direct" not in asThreeFunctions:
500 __Function["Direct"] = asThreeFunctions["Tangent"]
501 __Function.update(__Parameters)
505 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
506 # for k in ("Direct", "Tangent", "Adjoint"):
507 # if k in __Function and hasattr(__Function[k],"__class__"):
508 # if type(__Function[k]) is type(self.__init__):
509 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
511 if appliedInX is not None and isinstance(appliedInX, dict):
512 __appliedInX = appliedInX
513 elif appliedInX is not None:
514 __appliedInX = {"HXb":appliedInX}
518 if scheduledBy is not None:
519 self.__T = scheduledBy
521 if isinstance(__Function, dict) and \
522 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
523 ("Direct" in __Function) and (__Function["Direct"] is not None):
524 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
525 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
526 if "withdX" not in __Function: __Function["withdX"] = None
527 if "withReducingMemoryUse" not in __Function: __Function["withReducingMemoryUse"] = __reduceM
528 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = __avoidRC
529 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
530 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
531 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
532 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
533 from daCore import NumericObjects
534 FDA = NumericObjects.FDApproximation(
536 Function = __Function["Direct"],
537 centeredDF = __Function["CenteredFiniteDifference"],
538 increment = __Function["DifferentialIncrement"],
539 dX = __Function["withdX"],
540 extraArguments = self.__extraArgs,
541 reducingMemoryUse = __Function["withReducingMemoryUse"],
542 avoidingRedundancy = __Function["withAvoidingRedundancy"],
543 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
544 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
545 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
546 mpWorkers = __Function["NumberOfProcesses"],
547 mfEnabled = __Function["withmfEnabled"],
549 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = FDA.DirectOperator, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
550 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = FDA.TangentOperator, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
551 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = FDA.AdjointOperator, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
552 elif isinstance(__Function, dict) and \
553 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
554 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
555 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = __Function["Direct"], reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
556 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = __Function["Tangent"], reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
557 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = __Function["Adjoint"], reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
558 elif asMatrix is not None:
559 if isinstance(__Matrix, str):
560 __Matrix = PlatformInfo.strmatrix2liststr( __Matrix )
561 __matrice = numpy.asarray( __Matrix, dtype=float )
562 self.__FO["Direct"] = Operator( name = self.__name, fromMatrix = __matrice, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
563 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMatrix = __matrice, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF )
564 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMatrix = __matrice.T, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF )
567 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a matrix, a Direct operator for approximate derivatives or a Tangent/Adjoint operators pair. Please check your operator input."%self.__name)
569 if __appliedInX is not None:
570 self.__FO["AppliedInX"] = {}
571 for key in __appliedInX:
572 if isinstance(__appliedInX[key], str):
573 __appliedInX[key] = PlatformInfo.strvect2liststr( __appliedInX[key] )
574 self.__FO["AppliedInX"][key] = numpy.ravel( __appliedInX[key] ).reshape((-1,1))
576 self.__FO["AppliedInX"] = None
582 "x.__repr__() <==> repr(x)"
583 return repr(self.__FO)
586 "x.__str__() <==> str(x)"
587 return str(self.__FO)
589 # ==============================================================================
590 class Algorithm(object):
592 Classe générale d'interface de type algorithme
594 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
595 d'assimilation, en fournissant un container (dictionnaire) de variables
596 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
598 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
600 def __init__(self, name):
602 L'initialisation présente permet de fabriquer des variables de stockage
603 disponibles de manière générique dans les algorithmes élémentaires. Ces
604 variables de stockage sont ensuite conservées dans un dictionnaire
605 interne à l'objet, mais auquel on accède par la méthode "get".
607 Les variables prévues sont :
608 - APosterioriCorrelations : matrice de corrélations de la matrice A
609 - APosterioriCovariance : matrice de covariances a posteriori : A
610 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
611 - APosterioriVariances : vecteur des variances de la matrice A
612 - Analysis : vecteur d'analyse : Xa
613 - BMA : Background moins Analysis : Xa - Xb
614 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
615 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
616 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
617 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
618 - CostFunctionJo : partie observations de la fonction-coût : Jo
619 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
620 - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
621 - CurrentOptimum : état optimal courant lors d'itérations
622 - CurrentState : état courant lors d'itérations
623 - CurrentStepNumber : numéro courant de pas de mesure dans les algorithmes temporels
624 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
625 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
626 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
627 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
628 - Innovation : l'innovation : d = Y - H(X)
629 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
630 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
631 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
632 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
633 - KalmanGainAtOptimum : gain de Kalman à l'optimum
634 - MahalanobisConsistency : indicateur de consistance des covariances
635 - OMA : Observation moins Analyse : Y - Xa
636 - OMB : Observation moins Background : Y - Xb
637 - ForecastCovariance : covariance de l'état prédit courant lors d'itérations
638 - ForecastState : état prédit courant lors d'itérations
639 - Residu : dans le cas des algorithmes de vérification
640 - SampledStateForQuantiles : échantillons d'états pour l'estimation des quantiles
641 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
642 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
643 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
644 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
645 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
646 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
647 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
648 On peut rajouter des variables à stocker dans l'initialisation de
649 l'algorithme élémentaire qui va hériter de cette classe
651 logging.debug("%s Initialisation", str(name))
652 self._m = PlatformInfo.SystemUsage()
654 self._name = str( name )
655 self._parameters = {"StoreSupplementaryCalculations":[]}
656 self.__internal_state = {}
657 self.__required_parameters = {}
658 self.__required_inputs = {
659 "RequiredInputValues":{"mandatory":(), "optional":()},
660 "ClassificationTags":[],
662 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
663 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
664 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
666 self.StoredVariables = {}
667 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
668 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
669 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
670 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
671 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
672 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
673 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
674 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
675 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
676 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
677 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
678 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
679 self.StoredVariables["CurrentEnsembleState"] = Persistence.OneMatrix(name = "CurrentEnsembleState")
680 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
681 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
682 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
683 self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
684 self.StoredVariables["ForecastCovariance"] = Persistence.OneMatrix(name = "ForecastCovariance")
685 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
686 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
687 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
688 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
689 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
690 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
691 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
692 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
693 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
694 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
695 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
696 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
697 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
698 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
699 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
700 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
701 self.StoredVariables["SampledStateForQuantiles"] = Persistence.OneMatrix(name = "SampledStateForQuantiles")
702 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
703 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
704 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
705 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
706 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
707 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
708 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
709 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
711 for k in self.StoredVariables:
712 self.__canonical_stored_name[k.lower()] = k
714 for k, v in self.__variable_names_not_public.items():
715 self.__canonical_parameter_name[k.lower()] = k
716 self.__canonical_parameter_name["algorithm"] = "Algorithm"
717 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
719 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
721 logging.debug("%s Lancement", self._name)
722 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
723 self._getTimeState(reset=True)
725 # Mise a jour des paramètres internes avec le contenu de Parameters, en
726 # reprenant les valeurs par défauts pour toutes celles non définies
727 self.__setParameters(Parameters, reset=True)
728 for k, v in self.__variable_names_not_public.items():
729 if k not in self._parameters: self.__setParameters( {k:v} )
731 # Corrections et compléments des vecteurs
732 def __test_vvalue(argument, variable, argname, symbol=None):
733 if symbol is None: symbol = variable
735 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
736 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol))
737 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
738 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol))
740 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol))
742 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
743 logging.debug("%s %s vector %s is required and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
744 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
745 logging.debug("%s %s vector %s is optional and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
747 logging.debug("%s %s vector %s is set although neither required nor optional, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
749 __test_vvalue( Xb, "Xb", "Background or initial state" )
750 __test_vvalue( Y, "Y", "Observation" )
751 __test_vvalue( U, "U", "Control" )
753 # Corrections et compléments des covariances
754 def __test_cvalue(argument, variable, argname, symbol=None):
755 if symbol is None: symbol = variable
757 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
758 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol))
759 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
760 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol))
762 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol))
764 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
765 logging.debug("%s %s error covariance matrix %s is required and set."%(self._name,argname,symbol))
766 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
767 logging.debug("%s %s error covariance matrix %s is optional and set."%(self._name,argname,symbol))
769 logging.debug("%s %s error covariance matrix %s is set although neither required nor optional."%(self._name,argname,symbol))
771 __test_cvalue( B, "B", "Background" )
772 __test_cvalue( R, "R", "Observation" )
773 __test_cvalue( Q, "Q", "Evolution" )
775 # Corrections et compléments des opérateurs
776 def __test_ovalue(argument, variable, argname, symbol=None):
777 if symbol is None: symbol = variable
778 if argument is None or (isinstance(argument,dict) and len(argument)==0):
779 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
780 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol))
781 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
782 logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol))
784 logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol))
786 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
787 logging.debug("%s %s operator %s is required and set."%(self._name,argname,symbol))
788 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
789 logging.debug("%s %s operator %s is optional and set."%(self._name,argname,symbol))
791 logging.debug("%s %s operator %s is set although neither required nor optional."%(self._name,argname,symbol))
793 __test_ovalue( HO, "HO", "Observation", "H" )
794 __test_ovalue( EM, "EM", "Evolution", "M" )
795 __test_ovalue( CM, "CM", "Control Model", "C" )
797 # Corrections et compléments des bornes
798 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
799 logging.debug("%s Bounds taken into account"%(self._name,))
801 self._parameters["Bounds"] = None
802 if ("StateBoundsForQuantiles" in self._parameters) and isinstance(self._parameters["StateBoundsForQuantiles"], (list, tuple)) and (len(self._parameters["StateBoundsForQuantiles"]) > 0):
803 logging.debug("%s Bounds for quantiles states taken into account"%(self._name,))
804 # Attention : contrairement à Bounds, pas de défaut à None, sinon on ne peut pas être sans bornes
806 # Corrections et compléments de l'initialisation en X
807 if "InitializationPoint" in self._parameters:
809 if self._parameters["InitializationPoint"] is not None and hasattr(self._parameters["InitializationPoint"],'size'):
810 if self._parameters["InitializationPoint"].size != numpy.ravel(Xb).size:
811 raise ValueError("Incompatible size %i of forced initial point that have to replace the background of size %i" \
812 %(self._parameters["InitializationPoint"].size,numpy.ravel(Xb).size))
813 # Obtenu par typecast : numpy.ravel(self._parameters["InitializationPoint"])
815 self._parameters["InitializationPoint"] = numpy.ravel(Xb)
817 if self._parameters["InitializationPoint"] is None:
818 raise ValueError("Forced initial point can not be set without any given Background or required value")
820 # Correction pour pallier a un bug de TNC sur le retour du Minimum
821 if "Minimizer" in self._parameters and self._parameters["Minimizer"] == "TNC":
822 self.setParameterValue("StoreInternalVariables",True)
824 # Verbosité et logging
825 if logging.getLogger().level < logging.WARNING:
826 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
827 self._parameters["optmessages"] = 15
829 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
830 self._parameters["optmessages"] = 0
834 def _post_run(self,_oH=None):
836 if ("StoreSupplementaryCalculations" in self._parameters) and \
837 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
838 for _A in self.StoredVariables["APosterioriCovariance"]:
839 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
840 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
841 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
842 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
843 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
844 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
845 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
846 self.StoredVariables["APosterioriCorrelations"].store( _C )
847 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
848 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
849 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
850 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
851 logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
852 logging.debug("%s Terminé", self._name)
855 def _toStore(self, key):
856 "True if in StoreSupplementaryCalculations, else False"
857 return key in self._parameters["StoreSupplementaryCalculations"]
859 def get(self, key=None):
861 Renvoie l'une des variables stockées identifiée par la clé, ou le
862 dictionnaire de l'ensemble des variables disponibles en l'absence de
863 clé. Ce sont directement les variables sous forme objet qui sont
864 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
865 des classes de persistance.
868 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
870 return self.StoredVariables
872 def __contains__(self, key=None):
873 "D.__contains__(k) -> True if D has a key k, else False"
874 if key is None or key.lower() not in self.__canonical_stored_name:
877 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
880 "D.keys() -> list of D's keys"
881 if hasattr(self, "StoredVariables"):
882 return self.StoredVariables.keys()
887 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
888 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
889 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
894 raise TypeError("pop expected at least 1 arguments, got 0")
895 "If key is not found, d is returned if given, otherwise KeyError is raised"
901 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
903 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
904 sa forme mathématique la plus naturelle possible.
906 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
908 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None, listadv = None):
910 Permet de définir dans l'algorithme des paramètres requis et leurs
911 caractéristiques par défaut.
914 raise ValueError("A name is mandatory to define a required parameter.")
916 self.__required_parameters[name] = {
918 "typecast" : typecast,
925 self.__canonical_parameter_name[name.lower()] = name
926 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
928 def getRequiredParameters(self, noDetails=True):
930 Renvoie la liste des noms de paramètres requis ou directement le
931 dictionnaire des paramètres requis.
934 return sorted(self.__required_parameters.keys())
936 return self.__required_parameters
938 def setParameterValue(self, name=None, value=None):
940 Renvoie la valeur d'un paramètre requis de manière contrôlée
942 __k = self.__canonical_parameter_name[name.lower()]
943 default = self.__required_parameters[__k]["default"]
944 typecast = self.__required_parameters[__k]["typecast"]
945 minval = self.__required_parameters[__k]["minval"]
946 maxval = self.__required_parameters[__k]["maxval"]
947 listval = self.__required_parameters[__k]["listval"]
948 listadv = self.__required_parameters[__k]["listadv"]
950 if value is None and default is None:
952 elif value is None and default is not None:
953 if typecast is None: __val = default
954 else: __val = typecast( default )
956 if typecast is None: __val = value
959 __val = typecast( value )
961 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
963 if minval is not None and (numpy.array(__val, float) < minval).any():
964 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
965 if maxval is not None and (numpy.array(__val, float) > maxval).any():
966 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
967 if listval is not None or listadv is not None:
968 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
970 if listval is not None and v in listval: continue
971 elif listadv is not None and v in listadv: continue
973 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
974 elif not (listval is not None and __val in listval) and not (listadv is not None and __val in listadv):
975 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
979 def requireInputArguments(self, mandatory=(), optional=()):
981 Permet d'imposer des arguments de calcul requis en entrée.
983 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
984 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
986 def getInputArguments(self):
988 Permet d'obtenir les listes des arguments de calcul requis en entrée.
990 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
992 def setAttributes(self, tags=()):
994 Permet d'adjoindre des attributs comme les tags de classification.
995 Renvoie la liste actuelle dans tous les cas.
997 self.__required_inputs["ClassificationTags"].extend( tags )
998 return self.__required_inputs["ClassificationTags"]
1000 def __setParameters(self, fromDico={}, reset=False):
1002 Permet de stocker les paramètres reçus dans le dictionnaire interne.
1004 self._parameters.update( fromDico )
1005 __inverse_fromDico_keys = {}
1006 for k in fromDico.keys():
1007 if k.lower() in self.__canonical_parameter_name:
1008 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
1009 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
1010 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
1011 for k in self.__required_parameters.keys():
1012 if k in __canonic_fromDico_keys:
1013 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
1015 self._parameters[k] = self.setParameterValue(k)
1018 if hasattr(self._parameters[k],"__len__") and len(self._parameters[k]) > 100:
1019 logging.debug("%s %s de longueur %s", self._name, self.__required_parameters[k]["message"], len(self._parameters[k]))
1021 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
1023 def _setInternalState(self, key=None, value=None, fromDico={}, reset=False):
1025 Permet de stocker des variables nommées constituant l'état interne
1027 if reset: # Vide le dictionnaire préalablement
1028 self.__internal_state = {}
1029 if key is not None and value is not None:
1030 self.__internal_state[key] = value
1031 self.__internal_state.update( dict(fromDico) )
1033 def _getInternalState(self, key=None):
1035 Restitue un état interne sous la forme d'un dictionnaire de variables nommées
1037 if key is not None and key in self.__internal_state:
1038 return self.__internal_state[key]
1040 return self.__internal_state
1042 def _getTimeState(self, reset=False):
1044 Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde
1047 self.__initial_cpu_time = time.process_time()
1048 self.__initial_elapsed_time = time.perf_counter()
1051 self.__cpu_time = time.process_time() - self.__initial_cpu_time
1052 self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time
1053 return self.__cpu_time, self.__elapsed_time
1055 def _StopOnTimeLimit(self, X=None, withReason=False):
1056 "Stop criteria on time limit: True/False [+ Reason]"
1057 c, e = self._getTimeState()
1058 if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]:
1059 __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"])
1060 elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]:
1061 __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"])
1063 __SC, __SR = False, ""
1069 # ==============================================================================
1070 class PartialAlgorithm(object):
1072 Classe pour mimer "Algorithm" du point de vue stockage, mais sans aucune
1073 action avancée comme la vérification . Pour les méthodes reprises ici,
1074 le fonctionnement est identique à celles de la classe "Algorithm".
1076 def __init__(self, name):
1077 self._name = str( name )
1078 self._parameters = {"StoreSupplementaryCalculations":[]}
1080 self.StoredVariables = {}
1081 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
1082 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
1083 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
1084 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
1085 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
1086 self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
1088 self.__canonical_stored_name = {}
1089 for k in self.StoredVariables:
1090 self.__canonical_stored_name[k.lower()] = k
1092 def _toStore(self, key):
1093 "True if in StoreSupplementaryCalculations, else False"
1094 return key in self._parameters["StoreSupplementaryCalculations"]
1096 def get(self, key=None):
1098 Renvoie l'une des variables stockées identifiée par la clé, ou le
1099 dictionnaire de l'ensemble des variables disponibles en l'absence de
1100 clé. Ce sont directement les variables sous forme objet qui sont
1101 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
1102 des classes de persistance.
1105 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
1107 return self.StoredVariables
1109 # ==============================================================================
1110 class AlgorithmAndParameters(object):
1112 Classe générale d'interface d'action pour l'algorithme et ses paramètres
1115 name = "GenericAlgorithm",
1122 self.__name = str(name)
1126 self.__algorithm = {}
1127 self.__algorithmFile = None
1128 self.__algorithmName = None
1130 self.updateParameters( asDict, asScript )
1132 if asAlgorithm is None and asScript is not None:
1133 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1135 __Algo = asAlgorithm
1137 if __Algo is not None:
1138 self.__A = str(__Algo)
1139 self.__P.update( {"Algorithm":self.__A} )
1141 self.__setAlgorithm( self.__A )
1143 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
1145 def updateParameters(self,
1149 "Mise a jour des parametres"
1150 if asDict is None and asScript is not None:
1151 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1155 if __Dict is not None:
1156 self.__P.update( dict(__Dict) )
1158 def executePythonScheme(self, asDictAO = None):
1159 "Permet de lancer le calcul d'assimilation"
1160 Operator.CM.clearCache()
1162 if not isinstance(asDictAO, dict):
1163 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1164 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
1165 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
1166 else: self.__Xb = None
1167 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
1168 else: self.__Y = asDictAO["Observation"]
1169 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
1170 else: self.__U = asDictAO["ControlInput"]
1171 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
1172 else: self.__HO = asDictAO["ObservationOperator"]
1173 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
1174 else: self.__EM = asDictAO["EvolutionModel"]
1175 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
1176 else: self.__CM = asDictAO["ControlModel"]
1177 self.__B = asDictAO["BackgroundError"]
1178 self.__R = asDictAO["ObservationError"]
1179 self.__Q = asDictAO["EvolutionError"]
1181 self.__shape_validate()
1183 self.__algorithm.run(
1193 Parameters = self.__P,
1197 def executeYACSScheme(self, FileName=None):
1198 "Permet de lancer le calcul d'assimilation"
1199 if FileName is None or not os.path.exists(FileName):
1200 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1202 __file = os.path.abspath(FileName)
1203 logging.debug("The YACS file name is \"%s\"."%__file)
1204 if not PlatformInfo.has_salome or \
1205 not PlatformInfo.has_yacs or \
1206 not PlatformInfo.has_adao:
1207 raise ImportError("\n\n"+\
1208 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
1209 "Please load the right environnement before trying to use it.\n")
1212 import SALOMERuntime
1214 SALOMERuntime.RuntimeSALOME_setRuntime()
1216 r = pilot.getRuntime()
1217 xmlLoader = loader.YACSLoader()
1218 xmlLoader.registerProcCataLoader()
1220 catalogAd = r.loadCatalog("proc", __file)
1221 r.addCatalog(catalogAd)
1226 p = xmlLoader.load(__file)
1227 except IOError as ex:
1228 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1230 logger = p.getLogger("parser")
1231 if not logger.isEmpty():
1232 print("The imported YACS XML schema has errors on parsing:")
1233 print(logger.getStr())
1236 print("The YACS XML schema is not valid and will not be executed:")
1237 print(p.getErrorReport())
1239 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1240 p.checkConsistency(info)
1241 if info.areWarningsOrErrors():
1242 print("The YACS XML schema is not coherent and will not be executed:")
1243 print(info.getGlobalRepr())
1245 e = pilot.ExecutorSwig()
1247 if p.getEffectiveState() != pilot.DONE:
1248 print(p.getErrorReport())
1252 def get(self, key = None):
1253 "Vérifie l'existence d'une clé de variable ou de paramètres"
1254 if key in self.__algorithm:
1255 return self.__algorithm.get( key )
1256 elif key in self.__P:
1257 return self.__P[key]
1259 allvariables = self.__P
1260 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1263 def pop(self, k, d):
1264 "Necessaire pour le pickling"
1265 return self.__algorithm.pop(k, d)
1267 def getAlgorithmRequiredParameters(self, noDetails=True):
1268 "Renvoie la liste des paramètres requis selon l'algorithme"
1269 return self.__algorithm.getRequiredParameters(noDetails)
1271 def getAlgorithmInputArguments(self):
1272 "Renvoie la liste des entrées requises selon l'algorithme"
1273 return self.__algorithm.getInputArguments()
1275 def getAlgorithmAttributes(self):
1276 "Renvoie la liste des attributs selon l'algorithme"
1277 return self.__algorithm.setAttributes()
1279 def setObserver(self, __V, __O, __I, __S):
1280 if self.__algorithm is None \
1281 or isinstance(self.__algorithm, dict) \
1282 or not hasattr(self.__algorithm,"StoredVariables"):
1283 raise ValueError("No observer can be build before choosing an algorithm.")
1284 if __V not in self.__algorithm:
1285 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1287 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1290 HookParameters = __I,
1293 def removeObserver(self, __V, __O, __A = False):
1294 if self.__algorithm is None \
1295 or isinstance(self.__algorithm, dict) \
1296 or not hasattr(self.__algorithm,"StoredVariables"):
1297 raise ValueError("No observer can be removed before choosing an algorithm.")
1298 if __V not in self.__algorithm:
1299 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1301 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1306 def hasObserver(self, __V):
1307 if self.__algorithm is None \
1308 or isinstance(self.__algorithm, dict) \
1309 or not hasattr(self.__algorithm,"StoredVariables"):
1311 if __V not in self.__algorithm:
1313 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1316 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1317 for k in self.__variable_names_not_public:
1318 if k in __allvariables: __allvariables.remove(k)
1319 return __allvariables
1321 def __contains__(self, key=None):
1322 "D.__contains__(k) -> True if D has a key k, else False"
1323 return key in self.__algorithm or key in self.__P
1326 "x.__repr__() <==> repr(x)"
1327 return repr(self.__A)+", "+repr(self.__P)
1330 "x.__str__() <==> str(x)"
1331 return str(self.__A)+", "+str(self.__P)
1333 def __setAlgorithm(self, choice = None ):
1335 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1336 d'assimilation. L'argument est un champ caractère se rapportant au nom
1337 d'un algorithme réalisant l'opération sur les arguments fixes.
1340 raise ValueError("Error: algorithm choice has to be given")
1341 if self.__algorithmName is not None:
1342 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1343 daDirectory = "daAlgorithms"
1345 # Recherche explicitement le fichier complet
1346 # ------------------------------------------
1348 for directory in sys.path:
1349 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1350 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1351 if module_path is None:
1352 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1354 # Importe le fichier complet comme un module
1355 # ------------------------------------------
1357 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1358 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1359 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1360 raise ImportError("this module does not define a valid elementary algorithm.")
1361 self.__algorithmName = str(choice)
1362 sys.path = sys_path_tmp ; del sys_path_tmp
1363 except ImportError as e:
1364 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1366 # Instancie un objet du type élémentaire du fichier
1367 # -------------------------------------------------
1368 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1371 def __shape_validate(self):
1373 Validation de la correspondance correcte des tailles des variables et
1374 des matrices s'il y en a.
1376 if self.__Xb is None: __Xb_shape = (0,)
1377 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1378 elif hasattr(self.__Xb,"shape"):
1379 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1380 else: __Xb_shape = self.__Xb.shape()
1381 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1383 if self.__Y is None: __Y_shape = (0,)
1384 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1385 elif hasattr(self.__Y,"shape"):
1386 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1387 else: __Y_shape = self.__Y.shape()
1388 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1390 if self.__U is None: __U_shape = (0,)
1391 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1392 elif hasattr(self.__U,"shape"):
1393 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1394 else: __U_shape = self.__U.shape()
1395 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1397 if self.__B is None: __B_shape = (0,0)
1398 elif hasattr(self.__B,"shape"):
1399 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1400 else: __B_shape = self.__B.shape()
1401 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1403 if self.__R is None: __R_shape = (0,0)
1404 elif hasattr(self.__R,"shape"):
1405 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1406 else: __R_shape = self.__R.shape()
1407 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1409 if self.__Q is None: __Q_shape = (0,0)
1410 elif hasattr(self.__Q,"shape"):
1411 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1412 else: __Q_shape = self.__Q.shape()
1413 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1415 if len(self.__HO) == 0: __HO_shape = (0,0)
1416 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1417 elif hasattr(self.__HO["Direct"],"shape"):
1418 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1419 else: __HO_shape = self.__HO["Direct"].shape()
1420 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1422 if len(self.__EM) == 0: __EM_shape = (0,0)
1423 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1424 elif hasattr(self.__EM["Direct"],"shape"):
1425 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1426 else: __EM_shape = self.__EM["Direct"].shape()
1427 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1429 if len(self.__CM) == 0: __CM_shape = (0,0)
1430 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1431 elif hasattr(self.__CM["Direct"],"shape"):
1432 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1433 else: __CM_shape = self.__CM["Direct"].shape()
1434 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1436 # Vérification des conditions
1437 # ---------------------------
1438 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1439 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1440 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1441 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1443 if not( min(__B_shape) == max(__B_shape) ):
1444 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1445 if not( min(__R_shape) == max(__R_shape) ):
1446 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1447 if not( min(__Q_shape) == max(__Q_shape) ):
1448 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1449 if not( min(__EM_shape) == max(__EM_shape) ):
1450 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1452 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1453 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1454 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1455 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1456 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1457 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1458 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1459 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1461 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1462 if self.__algorithmName in ["EnsembleBlue",]:
1463 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1464 self.__Xb = Persistence.OneVector("Background")
1465 for member in asPersistentVector:
1466 self.__Xb.store( numpy.asarray(member, dtype=float) )
1467 __Xb_shape = min(__B_shape)
1469 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1471 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1472 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1474 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1475 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1477 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1478 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1480 if ("Bounds" in self.__P) \
1481 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1482 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1483 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1484 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1486 if ("StateBoundsForQuantiles" in self.__P) \
1487 and (isinstance(self.__P["StateBoundsForQuantiles"], list) or isinstance(self.__P["StateBoundsForQuantiles"], tuple)) \
1488 and (len(self.__P["StateBoundsForQuantiles"]) != max(__Xb_shape)):
1489 raise ValueError("The number \"%s\" of bound pairs for the quantile state (X) components is different of the size \"%s\" of the state itself." \
1490 %(len(self.__P["StateBoundsForQuantiles"]),max(__Xb_shape)))
1494 # ==============================================================================
1495 class RegulationAndParameters(object):
1497 Classe générale d'interface d'action pour la régulation et ses paramètres
1500 name = "GenericRegulation",
1507 self.__name = str(name)
1510 if asAlgorithm is None and asScript is not None:
1511 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1513 __Algo = asAlgorithm
1515 if asDict is None and asScript is not None:
1516 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1520 if __Dict is not None:
1521 self.__P.update( dict(__Dict) )
1523 if __Algo is not None:
1524 self.__P.update( {"Algorithm":str(__Algo)} )
1526 def get(self, key = None):
1527 "Vérifie l'existence d'une clé de variable ou de paramètres"
1529 return self.__P[key]
1533 # ==============================================================================
1534 class DataObserver(object):
1536 Classe générale d'interface de type observer
1539 name = "GenericObserver",
1551 self.__name = str(name)
1556 if onVariable is None:
1557 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1558 elif type(onVariable) in (tuple, list):
1559 self.__V = tuple(map( str, onVariable ))
1560 if withInfo is None:
1563 self.__I = (str(withInfo),)*len(self.__V)
1564 elif isinstance(onVariable, str):
1565 self.__V = (onVariable,)
1566 if withInfo is None:
1567 self.__I = (onVariable,)
1569 self.__I = (str(withInfo),)
1571 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1573 if asObsObject is not None:
1574 self.__O = asObsObject
1576 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1577 __Function = Observer2Func(__FunctionText)
1578 self.__O = __Function.getfunc()
1580 for k in range(len(self.__V)):
1583 if ename not in withAlgo:
1584 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1586 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1589 "x.__repr__() <==> repr(x)"
1590 return repr(self.__V)+"\n"+repr(self.__O)
1593 "x.__str__() <==> str(x)"
1594 return str(self.__V)+"\n"+str(self.__O)
1596 # ==============================================================================
1597 class UserScript(object):
1599 Classe générale d'interface de type texte de script utilisateur
1602 name = "GenericUserScript",
1609 self.__name = str(name)
1611 if asString is not None:
1613 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1614 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1615 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1616 self.__F = Templates.ObserverTemplates[asTemplate]
1617 elif asScript is not None:
1618 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1623 "x.__repr__() <==> repr(x)"
1624 return repr(self.__F)
1627 "x.__str__() <==> str(x)"
1628 return str(self.__F)
1630 # ==============================================================================
1631 class ExternalParameters(object):
1633 Classe générale d'interface de type texte de script utilisateur
1636 name = "GenericExternalParameters",
1642 self.__name = str(name)
1645 self.updateParameters( asDict, asScript )
1647 def updateParameters(self,
1651 "Mise a jour des parametres"
1652 if asDict is None and asScript is not None:
1653 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1657 if __Dict is not None:
1658 self.__P.update( dict(__Dict) )
1660 def get(self, key = None):
1662 return self.__P[key]
1664 return list(self.__P.keys())
1667 return list(self.__P.keys())
1669 def pop(self, k, d):
1670 return self.__P.pop(k, d)
1673 return self.__P.items()
1675 def __contains__(self, key=None):
1676 "D.__contains__(k) -> True if D has a key k, else False"
1677 return key in self.__P
1679 # ==============================================================================
1680 class State(object):
1682 Classe générale d'interface de type état
1685 name = "GenericVector",
1687 asPersistentVector = None,
1693 toBeChecked = False,
1696 Permet de définir un vecteur :
1697 - asVector : entrée des données, comme un vecteur compatible avec le
1698 constructeur de numpy.matrix, ou "True" si entrée par script.
1699 - asPersistentVector : entrée des données, comme une série de vecteurs
1700 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1701 type Persistence, ou "True" si entrée par script.
1702 - asScript : si un script valide est donné contenant une variable
1703 nommée "name", la variable est de type "asVector" (par défaut) ou
1704 "asPersistentVector" selon que l'une de ces variables est placée à
1706 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1707 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1708 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1709 nommée "name"), on récupère les colonnes et on les range ligne après
1710 ligne (colMajor=False, par défaut) ou colonne après colonne
1711 (colMajor=True). La variable résultante est de type "asVector" (par
1712 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1715 self.__name = str(name)
1716 self.__check = bool(toBeChecked)
1720 self.__is_vector = False
1721 self.__is_series = False
1723 if asScript is not None:
1724 __Vector, __Series = None, None
1725 if asPersistentVector:
1726 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1728 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1729 elif asDataFile is not None:
1730 __Vector, __Series = None, None
1731 if asPersistentVector:
1732 if colNames is not None:
1733 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1735 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1736 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1737 __Series = numpy.transpose(__Series)
1738 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1739 __Series = numpy.transpose(__Series)
1741 if colNames is not None:
1742 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1744 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1746 __Vector = numpy.ravel(__Vector, order = "F")
1748 __Vector = numpy.ravel(__Vector, order = "C")
1750 __Vector, __Series = asVector, asPersistentVector
1752 if __Vector is not None:
1753 self.__is_vector = True
1754 if isinstance(__Vector, str):
1755 __Vector = PlatformInfo.strvect2liststr( __Vector )
1756 self.__V = numpy.ravel(numpy.asarray( __Vector, dtype=float )).reshape((-1,1))
1757 self.shape = self.__V.shape
1758 self.size = self.__V.size
1759 elif __Series is not None:
1760 self.__is_series = True
1761 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1762 self.__V = Persistence.OneVector(self.__name)
1763 if isinstance(__Series, str):
1764 __Series = PlatformInfo.strmatrix2liststr(__Series)
1765 for member in __Series:
1766 if isinstance(member, str):
1767 member = PlatformInfo.strvect2liststr( member )
1768 self.__V.store(numpy.asarray( member, dtype=float ))
1771 if isinstance(self.__V.shape, (tuple, list)):
1772 self.shape = self.__V.shape
1774 self.shape = self.__V.shape()
1775 if len(self.shape) == 1:
1776 self.shape = (self.shape[0],1)
1777 self.size = self.shape[0] * self.shape[1]
1779 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1781 if scheduledBy is not None:
1782 self.__T = scheduledBy
1784 def getO(self, withScheduler=False):
1786 return self.__V, self.__T
1787 elif self.__T is None:
1793 "Vérification du type interne"
1794 return self.__is_vector
1797 "Vérification du type interne"
1798 return self.__is_series
1801 "x.__repr__() <==> repr(x)"
1802 return repr(self.__V)
1805 "x.__str__() <==> str(x)"
1806 return str(self.__V)
1808 # ==============================================================================
1809 class Covariance(object):
1811 Classe générale d'interface de type covariance
1814 name = "GenericCovariance",
1815 asCovariance = None,
1816 asEyeByScalar = None,
1817 asEyeByVector = None,
1820 toBeChecked = False,
1823 Permet de définir une covariance :
1824 - asCovariance : entrée des données, comme une matrice compatible avec
1825 le constructeur de numpy.matrix
1826 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1827 multiplicatif d'une matrice de corrélation identité, aucune matrice
1828 n'étant donc explicitement à donner
1829 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1830 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1831 n'étant donc explicitement à donner
1832 - asCovObject : entrée des données comme un objet python, qui a les
1833 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1834 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1835 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1836 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1837 pleine doit être vérifié
1839 self.__name = str(name)
1840 self.__check = bool(toBeChecked)
1843 self.__is_scalar = False
1844 self.__is_vector = False
1845 self.__is_matrix = False
1846 self.__is_object = False
1848 if asScript is not None:
1849 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1851 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1853 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1855 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1857 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1859 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1861 if __Scalar is not None:
1862 if isinstance(__Scalar, str):
1863 __Scalar = PlatformInfo.strvect2liststr( __Scalar )
1864 if len(__Scalar) > 0: __Scalar = __Scalar[0]
1865 if numpy.array(__Scalar).size != 1:
1866 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.array(__Scalar).size)
1867 self.__is_scalar = True
1868 self.__C = numpy.abs( float(__Scalar) )
1871 elif __Vector is not None:
1872 if isinstance(__Vector, str):
1873 __Vector = PlatformInfo.strvect2liststr( __Vector )
1874 self.__is_vector = True
1875 self.__C = numpy.abs( numpy.ravel(numpy.asarray( __Vector, dtype=float )) )
1876 self.shape = (self.__C.size,self.__C.size)
1877 self.size = self.__C.size**2
1878 elif __Matrix is not None:
1879 self.__is_matrix = True
1880 self.__C = numpy.matrix( __Matrix, float )
1881 self.shape = self.__C.shape
1882 self.size = self.__C.size
1883 elif __Object is not None:
1884 self.__is_object = True
1886 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__matmul__","__mul__","__rmatmul__","__rmul__"):
1887 if not hasattr(self.__C,at):
1888 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1889 if hasattr(self.__C,"shape"):
1890 self.shape = self.__C.shape
1893 if hasattr(self.__C,"size"):
1894 self.size = self.__C.size
1899 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1903 def __validate(self):
1905 if self.__C is None:
1906 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1907 if self.ismatrix() and min(self.shape) != max(self.shape):
1908 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1909 if self.isobject() and min(self.shape) != max(self.shape):
1910 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1911 if self.isscalar() and self.__C <= 0:
1912 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1913 if self.isvector() and (self.__C <= 0).any():
1914 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1915 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1917 numpy.linalg.cholesky( self.__C )
1919 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1920 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1924 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1927 "Vérification du type interne"
1928 return self.__is_scalar
1931 "Vérification du type interne"
1932 return self.__is_vector
1935 "Vérification du type interne"
1936 return self.__is_matrix
1939 "Vérification du type interne"
1940 return self.__is_object
1945 return Covariance(self.__name+"I", asCovariance = numpy.linalg.inv(self.__C) )
1946 elif self.isvector():
1947 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1948 elif self.isscalar():
1949 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1950 elif self.isobject() and hasattr(self.__C,"getI"):
1951 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1953 return None # Indispensable
1958 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1959 elif self.isvector():
1960 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1961 elif self.isscalar():
1962 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1963 elif self.isobject() and hasattr(self.__C,"getT"):
1964 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1966 raise AttributeError("the %s covariance matrix has no getT attribute."%(self.__name,))
1969 "Décomposition de Cholesky"
1971 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1972 elif self.isvector():
1973 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1974 elif self.isscalar():
1975 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1976 elif self.isobject() and hasattr(self.__C,"cholesky"):
1977 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1979 raise AttributeError("the %s covariance matrix has no cholesky attribute."%(self.__name,))
1981 def choleskyI(self):
1982 "Inversion de la décomposition de Cholesky"
1984 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.linalg.cholesky(self.__C)) )
1985 elif self.isvector():
1986 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1987 elif self.isscalar():
1988 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1989 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1990 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1992 raise AttributeError("the %s covariance matrix has no choleskyI attribute."%(self.__name,))
1995 "Racine carrée matricielle"
1998 return Covariance(self.__name+"C", asCovariance = numpy.real(scipy.linalg.sqrtm(self.__C)) )
1999 elif self.isvector():
2000 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
2001 elif self.isscalar():
2002 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
2003 elif self.isobject() and hasattr(self.__C,"sqrtm"):
2004 return Covariance(self.__name+"C", asCovObject = self.__C.sqrtm() )
2006 raise AttributeError("the %s covariance matrix has no sqrtm attribute."%(self.__name,))
2009 "Inversion de la racine carrée matricielle"
2012 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.real(scipy.linalg.sqrtm(self.__C))) )
2013 elif self.isvector():
2014 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
2015 elif self.isscalar():
2016 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
2017 elif self.isobject() and hasattr(self.__C,"sqrtmI"):
2018 return Covariance(self.__name+"H", asCovObject = self.__C.sqrtmI() )
2020 raise AttributeError("the %s covariance matrix has no sqrtmI attribute."%(self.__name,))
2022 def diag(self, msize=None):
2023 "Diagonale de la matrice"
2025 return numpy.diag(self.__C)
2026 elif self.isvector():
2028 elif self.isscalar():
2030 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2032 return self.__C * numpy.ones(int(msize))
2033 elif self.isobject() and hasattr(self.__C,"diag"):
2034 return self.__C.diag()
2036 raise AttributeError("the %s covariance matrix has no diag attribute."%(self.__name,))
2038 def trace(self, msize=None):
2039 "Trace de la matrice"
2041 return numpy.trace(self.__C)
2042 elif self.isvector():
2043 return float(numpy.sum(self.__C))
2044 elif self.isscalar():
2046 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2048 return self.__C * int(msize)
2049 elif self.isobject():
2050 return self.__C.trace()
2052 raise AttributeError("the %s covariance matrix has no trace attribute."%(self.__name,))
2054 def asfullmatrix(self, msize=None):
2057 return numpy.asarray(self.__C, dtype=float)
2058 elif self.isvector():
2059 return numpy.asarray( numpy.diag(self.__C), dtype=float )
2060 elif self.isscalar():
2062 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2064 return numpy.asarray( self.__C * numpy.eye(int(msize)), dtype=float )
2065 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
2066 return self.__C.asfullmatrix()
2068 raise AttributeError("the %s covariance matrix has no asfullmatrix attribute."%(self.__name,))
2070 def assparsematrix(self):
2078 "x.__repr__() <==> repr(x)"
2079 return repr(self.__C)
2082 "x.__str__() <==> str(x)"
2083 return str(self.__C)
2085 def __add__(self, other):
2086 "x.__add__(y) <==> x+y"
2087 if self.ismatrix() or self.isobject():
2088 return self.__C + numpy.asmatrix(other)
2089 elif self.isvector() or self.isscalar():
2090 _A = numpy.asarray(other)
2091 if len(_A.shape) == 1:
2092 _A.reshape((-1,1))[::2] += self.__C
2094 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
2095 return numpy.asmatrix(_A)
2097 def __radd__(self, other):
2098 "x.__radd__(y) <==> y+x"
2099 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
2101 def __sub__(self, other):
2102 "x.__sub__(y) <==> x-y"
2103 if self.ismatrix() or self.isobject():
2104 return self.__C - numpy.asmatrix(other)
2105 elif self.isvector() or self.isscalar():
2106 _A = numpy.asarray(other)
2107 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
2108 return numpy.asmatrix(_A)
2110 def __rsub__(self, other):
2111 "x.__rsub__(y) <==> y-x"
2112 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
2115 "x.__neg__() <==> -x"
2118 def __matmul__(self, other):
2119 "x.__mul__(y) <==> x@y"
2120 if self.ismatrix() and isinstance(other, (int, float)):
2121 return numpy.asarray(self.__C) * other
2122 elif self.ismatrix() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2123 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2124 return numpy.ravel(self.__C @ numpy.ravel(other))
2125 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2126 return numpy.asarray(self.__C) @ numpy.asarray(other)
2128 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asarray(other).shape,self.__name))
2129 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2130 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2131 return numpy.ravel(self.__C) * numpy.ravel(other)
2132 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2133 return numpy.ravel(self.__C).reshape((-1,1)) * numpy.asarray(other)
2135 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2136 elif self.isscalar() and isinstance(other,numpy.matrix):
2137 return numpy.asarray(self.__C * other)
2138 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2139 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2140 return self.__C * numpy.ravel(other)
2142 return self.__C * numpy.asarray(other)
2143 elif self.isobject():
2144 return self.__C.__matmul__(other)
2146 raise NotImplementedError("%s covariance matrix __matmul__ method not available for %s type!"%(self.__name,type(other)))
2148 def __mul__(self, other):
2149 "x.__mul__(y) <==> x*y"
2150 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2151 return self.__C * other
2152 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2153 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2154 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2155 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2156 return self.__C * numpy.asmatrix(other)
2158 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
2159 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2160 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2161 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
2162 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2163 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
2165 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2166 elif self.isscalar() and isinstance(other,numpy.matrix):
2167 return self.__C * other
2168 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2169 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2170 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2172 return self.__C * numpy.asmatrix(other)
2173 elif self.isobject():
2174 return self.__C.__mul__(other)
2176 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
2178 def __rmatmul__(self, other):
2179 "x.__rmul__(y) <==> y@x"
2180 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2181 return other * self.__C
2182 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2183 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2184 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2185 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2186 return numpy.asmatrix(other) * self.__C
2188 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2189 elif self.isvector() and isinstance(other,numpy.matrix):
2190 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2191 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2192 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2193 return numpy.asmatrix(numpy.array(other) * self.__C)
2195 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2196 elif self.isscalar() and isinstance(other,numpy.matrix):
2197 return other * self.__C
2198 elif self.isobject():
2199 return self.__C.__rmatmul__(other)
2201 raise NotImplementedError("%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name,type(other)))
2203 def __rmul__(self, other):
2204 "x.__rmul__(y) <==> y*x"
2205 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2206 return other * self.__C
2207 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2208 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2209 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2210 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2211 return numpy.asmatrix(other) * self.__C
2213 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2214 elif self.isvector() and isinstance(other,numpy.matrix):
2215 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2216 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2217 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2218 return numpy.asmatrix(numpy.array(other) * self.__C)
2220 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2221 elif self.isscalar() and isinstance(other,numpy.matrix):
2222 return other * self.__C
2223 elif self.isscalar() and isinstance(other,float):
2224 return other * self.__C
2225 elif self.isobject():
2226 return self.__C.__rmul__(other)
2228 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
2231 "x.__len__() <==> len(x)"
2232 return self.shape[0]
2234 # ==============================================================================
2235 class Observer2Func(object):
2237 Création d'une fonction d'observateur a partir de son texte
2239 def __init__(self, corps=""):
2240 self.__corps = corps
2241 def func(self,var,info):
2242 "Fonction d'observation"
2245 "Restitution du pointeur de fonction dans l'objet"
2248 # ==============================================================================
2249 class CaseLogger(object):
2251 Conservation des commandes de création d'un cas
2253 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
2254 self.__name = str(__name)
2255 self.__objname = str(__objname)
2256 self.__logSerie = []
2257 self.__switchoff = False
2259 "TUI" :Interfaces._TUIViewer,
2260 "SCD" :Interfaces._SCDViewer,
2261 "YACS":Interfaces._YACSViewer,
2262 "SimpleReportInRst":Interfaces._SimpleReportInRstViewer,
2263 "SimpleReportInHtml":Interfaces._SimpleReportInHtmlViewer,
2264 "SimpleReportInPlainTxt":Interfaces._SimpleReportInPlainTxtViewer,
2267 "TUI" :Interfaces._TUIViewer,
2268 "COM" :Interfaces._COMViewer,
2270 if __addViewers is not None:
2271 self.__viewers.update(dict(__addViewers))
2272 if __addLoaders is not None:
2273 self.__loaders.update(dict(__addLoaders))
2275 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
2276 "Enregistrement d'une commande individuelle"
2277 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
2278 if "self" in __keys: __keys.remove("self")
2279 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
2281 self.__switchoff = True
2283 self.__switchoff = False
2285 def dump(self, __filename=None, __format="TUI", __upa=""):
2286 "Restitution normalisée des commandes"
2287 if __format in self.__viewers:
2288 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2290 raise ValueError("Dumping as \"%s\" is not available"%__format)
2291 return __formater.dump(__filename, __upa)
2293 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2294 "Chargement normalisé des commandes"
2295 if __format in self.__loaders:
2296 __formater = self.__loaders[__format]()
2298 raise ValueError("Loading as \"%s\" is not available"%__format)
2299 return __formater.load(__filename, __content, __object)
2301 # ==============================================================================
2304 _extraArguments = None,
2305 _sFunction = lambda x: x,
2310 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2311 correspondante de valeurs de la fonction en argument
2313 # Vérifications et définitions initiales
2314 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2315 if not PlatformInfo.isIterable( __xserie ):
2316 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2318 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2321 __mpWorkers = int(_mpWorkers)
2323 import multiprocessing
2334 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2335 with multiprocessing.Pool(__mpWorkers) as pool:
2336 __multiHX = pool.map( _sFunction, _jobs )
2339 # logging.debug("MULTF Internal multiprocessing calculation end")
2341 # logging.debug("MULTF Internal monoprocessing calculation begin")
2343 if _extraArguments is None:
2344 for __xvalue in __xserie:
2345 __multiHX.append( _sFunction( __xvalue ) )
2346 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2347 for __xvalue in __xserie:
2348 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2349 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2350 for __xvalue in __xserie:
2351 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2353 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2354 # logging.debug("MULTF Internal monoprocessing calculation end")
2356 # logging.debug("MULTF Internal multifonction calculations end")
2359 # ==============================================================================
2360 if __name__ == "__main__":
2361 print('\n AUTODIAGNOSTIC\n')