1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2022 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
35 from functools import partial
36 from daCore import Persistence, PlatformInfo, Interfaces
37 from daCore import Templates
39 # ==============================================================================
40 class CacheManager(object):
42 Classe générale de gestion d'un cache de calculs
45 toleranceInRedundancy = 1.e-18,
46 lenghtOfRedundancy = -1,
49 Les caractéristiques de tolérance peuvent être modifiées à la création.
51 self.__tolerBP = float(toleranceInRedundancy)
52 self.__lenghtOR = int(lenghtOfRedundancy)
53 self.__initlnOR = self.__lenghtOR
63 def wasCalculatedIn(self, xValue, oName="" ):
64 "Vérifie l'existence d'un calcul correspondant à la valeur"
68 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
69 if not hasattr(xValue, 'size'):
71 elif (str(oName) != self.__listOPCV[i][3]):
73 elif (xValue.size != self.__listOPCV[i][0].size):
75 elif (numpy.ravel(xValue)[0] - self.__listOPCV[i][0][0]) > (self.__tolerBP * self.__listOPCV[i][2] / self.__listOPCV[i][0].size):
77 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < (self.__tolerBP * self.__listOPCV[i][2]):
79 __HxV = self.__listOPCV[i][1]
83 def storeValueInX(self, xValue, HxValue, oName="" ):
84 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
85 if self.__lenghtOR < 0:
86 self.__lenghtOR = 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
87 self.__initlnOR = self.__lenghtOR
88 self.__seenNames.append(str(oName))
89 if str(oName) not in self.__seenNames: # Etend la liste si nouveau
90 self.__lenghtOR += 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
91 self.__initlnOR += self.__lenghtOR
92 self.__seenNames.append(str(oName))
93 while len(self.__listOPCV) > self.__lenghtOR:
94 self.__listOPCV.pop(0)
95 self.__listOPCV.append( (
96 copy.copy(numpy.ravel(xValue)), # 0 Previous point
97 copy.copy(HxValue), # 1 Previous value
98 numpy.linalg.norm(xValue), # 2 Norm
99 str(oName), # 3 Operator name
104 self.__initlnOR = self.__lenghtOR
106 self.__enabled = False
110 self.__lenghtOR = self.__initlnOR
111 self.__enabled = True
113 # ==============================================================================
114 class Operator(object):
116 Classe générale d'interface de type opérateur simple
124 name = "GenericOperator",
127 avoidingRedundancy = True,
128 reducingMemoryUse = False,
129 inputAsMultiFunction = False,
130 enableMultiProcess = False,
131 extraArguments = None,
134 On construit un objet de ce type en fournissant, à l'aide de l'un des
135 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
138 - name : nom d'opérateur
139 - fromMethod : argument de type fonction Python
140 - fromMatrix : argument adapté au constructeur numpy.array/matrix
141 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
142 - reducingMemoryUse : booléen forçant (ou pas) des calculs moins
144 - inputAsMultiFunction : booléen indiquant une fonction explicitement
145 définie (ou pas) en multi-fonction
146 - extraArguments : arguments supplémentaires passés à la fonction de
147 base et ses dérivées (tuple ou dictionnaire)
149 self.__name = str(name)
150 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
151 self.__reduceM = bool( reducingMemoryUse )
152 self.__avoidRC = bool( avoidingRedundancy )
153 self.__inputAsMF = bool( inputAsMultiFunction )
154 self.__mpEnabled = bool( enableMultiProcess )
155 self.__extraArgs = extraArguments
156 if fromMethod is not None and self.__inputAsMF:
157 self.__Method = fromMethod # logtimer(fromMethod)
159 self.__Type = "Method"
160 elif fromMethod is not None and not self.__inputAsMF:
161 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
163 self.__Type = "Method"
164 elif fromMatrix is not None:
166 if isinstance(fromMatrix, str):
167 fromMatrix = PlatformInfo.strmatrix2liststr( fromMatrix )
168 self.__Matrix = numpy.asarray( fromMatrix, dtype=float )
169 self.__Type = "Matrix"
175 def disableAvoidingRedundancy(self):
177 Operator.CM.disable()
179 def enableAvoidingRedundancy(self):
184 Operator.CM.disable()
190 def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False):
192 Permet de restituer le résultat de l'application de l'opérateur à une
193 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
194 argument devant a priori être du bon type.
196 - les arguments par série sont :
197 - xValue : argument adapté pour appliquer l'opérateur
198 - HValue : valeur précalculée de l'opérateur en ce point
199 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
206 if HValue is not None:
210 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
212 if _HValue is not None:
213 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
215 for i in range(len(_HValue)):
216 _HxValue.append( _HValue[i] )
218 Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name)
223 for i, xv in enumerate(_xValue):
225 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
227 __alreadyCalculated = False
229 if __alreadyCalculated:
230 self.__addOneCacheCall()
233 if self.__Matrix is not None:
234 self.__addOneMatrixCall()
235 _hv = self.__Matrix @ numpy.ravel(xv)
237 self.__addOneMethodCall()
241 _HxValue.append( _hv )
243 if len(_xserie)>0 and self.__Matrix is None:
244 if self.__extraArgs is None:
245 _hserie = self.__Method( _xserie ) # Calcul MF
247 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
248 if not hasattr(_hserie, "pop"):
249 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
255 Operator.CM.storeValueInX(_xv,_hv,self.__name)
257 if returnSerieAsArrayMatrix:
258 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
260 if argsAsSerie: return _HxValue
261 else: return _HxValue[-1]
263 def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
265 Permet de restituer le résultat de l'application de l'opérateur à des
266 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
267 argument devant a priori être du bon type. Si la uValue est None,
268 on suppose que l'opérateur ne s'applique qu'à xValue.
270 - paires : les arguments par paire sont :
271 - xValue : argument X adapté pour appliquer l'opérateur
272 - uValue : argument U adapté pour appliquer l'opérateur
273 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
275 if argsAsSerie: _xuValue = paires
276 else: _xuValue = (paires,)
277 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
279 if self.__Matrix is not None:
281 for paire in _xuValue:
282 _xValue, _uValue = paire
283 self.__addOneMatrixCall()
284 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
287 for paire in _xuValue:
288 _xValue, _uValue = paire
289 if _uValue is not None:
290 _xuArgs.append( paire )
292 _xuArgs.append( _xValue )
293 self.__addOneMethodCall( len(_xuArgs) )
294 if self.__extraArgs is None:
295 _HxValue = self.__Method( _xuArgs ) # Calcul MF
297 _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF
299 if returnSerieAsArrayMatrix:
300 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
302 if argsAsSerie: return _HxValue
303 else: return _HxValue[-1]
305 def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
307 Permet de restituer le résultat de l'application de l'opérateur à une
308 série d'arguments xValue, sachant que l'opérateur est valable en
309 xNominal. Cette méthode se contente d'appliquer, son argument devant a
310 priori être du bon type. Si l'opérateur est linéaire car c'est une
311 matrice, alors il est valable en tout point nominal et xNominal peut
312 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
313 permet d'indiquer que l'argument est multi-paires.
315 - paires : les arguments par paire sont :
316 - xNominal : série d'arguments permettant de donner le point où
317 l'opérateur est construit pour être ensuite appliqué
318 - xValue : série d'arguments adaptés pour appliquer l'opérateur
319 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
321 if argsAsSerie: _nxValue = paires
322 else: _nxValue = (paires,)
323 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
325 if self.__Matrix is not None:
327 for paire in _nxValue:
328 _xNominal, _xValue = paire
329 self.__addOneMatrixCall()
330 _HxValue.append( self.__Matrix @ numpy.ravel(_xValue) )
332 self.__addOneMethodCall( len(_nxValue) )
333 if self.__extraArgs is None:
334 _HxValue = self.__Method( _nxValue ) # Calcul MF
336 _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
338 if returnSerieAsArrayMatrix:
339 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
341 if argsAsSerie: return _HxValue
342 else: return _HxValue[-1]
344 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
346 Permet de renvoyer l'opérateur sous la forme d'une matrice
348 if self.__Matrix is not None:
349 self.__addOneMatrixCall()
350 mValue = [self.__Matrix,]
351 elif not isinstance(ValueForMethodForm,str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
354 self.__addOneMethodCall( len(ValueForMethodForm) )
355 for _vfmf in ValueForMethodForm:
356 mValue.append( self.__Method(((_vfmf, None),)) )
358 self.__addOneMethodCall()
359 mValue = self.__Method(((ValueForMethodForm, None),))
361 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
363 if argsAsSerie: return mValue
364 else: return mValue[-1]
368 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
369 la forme d'une matrice
371 if self.__Matrix is not None:
372 return self.__Matrix.shape
374 raise ValueError("Matrix form of the operator is not available, nor the shape")
376 def nbcalls(self, which=None):
378 Renvoie les nombres d'évaluations de l'opérateur
381 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
382 self.__NbCallsAsMatrix,
383 self.__NbCallsAsMethod,
384 self.__NbCallsOfCached,
385 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
386 Operator.NbCallsAsMatrix,
387 Operator.NbCallsAsMethod,
388 Operator.NbCallsOfCached,
390 if which is None: return __nbcalls
391 else: return __nbcalls[which]
393 def __addOneMatrixCall(self):
394 "Comptabilise un appel"
395 self.__NbCallsAsMatrix += 1 # Decompte local
396 Operator.NbCallsAsMatrix += 1 # Decompte global
398 def __addOneMethodCall(self, nb = 1):
399 "Comptabilise un appel"
400 self.__NbCallsAsMethod += nb # Decompte local
401 Operator.NbCallsAsMethod += nb # Decompte global
403 def __addOneCacheCall(self):
404 "Comptabilise un appel"
405 self.__NbCallsOfCached += 1 # Decompte local
406 Operator.NbCallsOfCached += 1 # Decompte global
408 # ==============================================================================
409 class FullOperator(object):
411 Classe générale d'interface de type opérateur complet
412 (Direct, Linéaire Tangent, Adjoint)
415 name = "GenericFullOperator",
417 asOneFunction = None, # 1 Fonction
418 asThreeFunctions = None, # 3 Fonctions in a dictionary
419 asScript = None, # 1 or 3 Fonction(s) by script
420 asDict = None, # Parameters
422 extraArguments = None,
423 performancePrf = None,
424 inputAsMF = False,# Fonction(s) as Multi-Functions
429 self.__name = str(name)
430 self.__check = bool(toBeChecked)
431 self.__extraArgs = extraArguments
436 if (asDict is not None) and isinstance(asDict, dict):
437 __Parameters.update( asDict )
438 # Priorité à EnableMultiProcessingInDerivatives=True
439 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
440 __Parameters["EnableMultiProcessingInDerivatives"] = True
441 __Parameters["EnableMultiProcessingInEvaluation"] = False
442 if "EnableMultiProcessingInDerivatives" not in __Parameters:
443 __Parameters["EnableMultiProcessingInDerivatives"] = False
444 if __Parameters["EnableMultiProcessingInDerivatives"]:
445 __Parameters["EnableMultiProcessingInEvaluation"] = False
446 if "EnableMultiProcessingInEvaluation" not in __Parameters:
447 __Parameters["EnableMultiProcessingInEvaluation"] = False
448 if "withIncrement" in __Parameters: # Temporaire
449 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
450 # Le défaut est équivalent à "ReducedOverallRequirements"
451 __reduceM, __avoidRC = True, True
452 if performancePrf is not None:
453 if performancePrf == "ReducedAmountOfCalculation":
454 __reduceM, __avoidRC = False, True
455 elif performancePrf == "ReducedMemoryFootprint":
456 __reduceM, __avoidRC = True, False
457 elif performancePrf == "NoSavings":
458 __reduceM, __avoidRC = False, False
460 if asScript is not None:
461 __Matrix, __Function = None, None
463 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
465 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
466 __Function.update({"useApproximatedDerivatives":True})
467 __Function.update(__Parameters)
468 elif asThreeFunctions:
470 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
471 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
472 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
474 __Function.update(__Parameters)
477 if asOneFunction is not None:
478 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
479 if asOneFunction["Direct"] is not None:
480 __Function = asOneFunction
482 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
484 __Function = { "Direct":asOneFunction }
485 __Function.update({"useApproximatedDerivatives":True})
486 __Function.update(__Parameters)
487 elif asThreeFunctions is not None:
488 if isinstance(asThreeFunctions, dict) and \
489 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
490 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
491 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
492 __Function = asThreeFunctions
493 elif isinstance(asThreeFunctions, dict) and \
494 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
495 __Function = asThreeFunctions
496 __Function.update({"useApproximatedDerivatives":True})
498 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
499 if "Direct" not in asThreeFunctions:
500 __Function["Direct"] = asThreeFunctions["Tangent"]
501 __Function.update(__Parameters)
505 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
506 # for k in ("Direct", "Tangent", "Adjoint"):
507 # if k in __Function and hasattr(__Function[k],"__class__"):
508 # if type(__Function[k]) is type(self.__init__):
509 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
511 if appliedInX is not None and isinstance(appliedInX, dict):
512 __appliedInX = appliedInX
513 elif appliedInX is not None:
514 __appliedInX = {"HXb":appliedInX}
518 if scheduledBy is not None:
519 self.__T = scheduledBy
521 if isinstance(__Function, dict) and \
522 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
523 ("Direct" in __Function) and (__Function["Direct"] is not None):
524 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
525 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
526 if "withdX" not in __Function: __Function["withdX"] = None
527 if "withReducingMemoryUse" not in __Function: __Function["withReducingMemoryUse"] = __reduceM
528 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = __avoidRC
529 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
530 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
531 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
532 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
533 from daCore import NumericObjects
534 FDA = NumericObjects.FDApproximation(
536 Function = __Function["Direct"],
537 centeredDF = __Function["CenteredFiniteDifference"],
538 increment = __Function["DifferentialIncrement"],
539 dX = __Function["withdX"],
540 extraArguments = self.__extraArgs,
541 reducingMemoryUse = __Function["withReducingMemoryUse"],
542 avoidingRedundancy = __Function["withAvoidingRedundancy"],
543 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
544 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
545 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
546 mpWorkers = __Function["NumberOfProcesses"],
547 mfEnabled = __Function["withmfEnabled"],
549 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = FDA.DirectOperator, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
550 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = FDA.TangentOperator, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
551 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = FDA.AdjointOperator, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
552 elif isinstance(__Function, dict) and \
553 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
554 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
555 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = __Function["Direct"], reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
556 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = __Function["Tangent"], reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
557 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = __Function["Adjoint"], reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
558 elif asMatrix is not None:
559 if isinstance(__Matrix, str):
560 __Matrix = PlatformInfo.strmatrix2liststr( __Matrix )
561 __matrice = numpy.asarray( __Matrix, dtype=float )
562 self.__FO["Direct"] = Operator( name = self.__name, fromMatrix = __matrice, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
563 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMatrix = __matrice, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF )
564 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMatrix = __matrice.T, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF )
567 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a matrix, a Direct operator for approximate derivatives or a Tangent/Adjoint operators pair. Please check your operator input."%self.__name)
569 if __appliedInX is not None:
570 self.__FO["AppliedInX"] = {}
571 for key in list(__appliedInX.keys()):
572 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
573 # Pour le cas où l'on a une vraie matrice
574 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
575 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
576 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
577 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
579 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
581 self.__FO["AppliedInX"] = None
587 "x.__repr__() <==> repr(x)"
588 return repr(self.__FO)
591 "x.__str__() <==> str(x)"
592 return str(self.__FO)
594 # ==============================================================================
595 class Algorithm(object):
597 Classe générale d'interface de type algorithme
599 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
600 d'assimilation, en fournissant un container (dictionnaire) de variables
601 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
603 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
605 def __init__(self, name):
607 L'initialisation présente permet de fabriquer des variables de stockage
608 disponibles de manière générique dans les algorithmes élémentaires. Ces
609 variables de stockage sont ensuite conservées dans un dictionnaire
610 interne à l'objet, mais auquel on accède par la méthode "get".
612 Les variables prévues sont :
613 - APosterioriCorrelations : matrice de corrélations de la matrice A
614 - APosterioriCovariance : matrice de covariances a posteriori : A
615 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
616 - APosterioriVariances : vecteur des variances de la matrice A
617 - Analysis : vecteur d'analyse : Xa
618 - BMA : Background moins Analysis : Xa - Xb
619 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
620 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
621 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
622 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
623 - CostFunctionJo : partie observations de la fonction-coût : Jo
624 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
625 - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
626 - CurrentOptimum : état optimal courant lors d'itérations
627 - CurrentState : état courant lors d'itérations
628 - CurrentStepNumber : numéro courant de pas de mesure dans les algorithmes temporels
629 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
630 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
631 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
632 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
633 - Innovation : l'innovation : d = Y - H(X)
634 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
635 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
636 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
637 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
638 - KalmanGainAtOptimum : gain de Kalman à l'optimum
639 - MahalanobisConsistency : indicateur de consistance des covariances
640 - OMA : Observation moins Analyse : Y - Xa
641 - OMB : Observation moins Background : Y - Xb
642 - ForecastCovariance : covariance de l'état prédit courant lors d'itérations
643 - ForecastState : état prédit courant lors d'itérations
644 - Residu : dans le cas des algorithmes de vérification
645 - SampledStateForQuantiles : échantillons d'états pour l'estimation des quantiles
646 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
647 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
648 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
649 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
650 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
651 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
652 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
653 On peut rajouter des variables à stocker dans l'initialisation de
654 l'algorithme élémentaire qui va hériter de cette classe
656 logging.debug("%s Initialisation", str(name))
657 self._m = PlatformInfo.SystemUsage()
659 self._name = str( name )
660 self._parameters = {"StoreSupplementaryCalculations":[]}
661 self.__internal_state = {}
662 self.__required_parameters = {}
663 self.__required_inputs = {
664 "RequiredInputValues":{"mandatory":(), "optional":()},
665 "ClassificationTags":[],
667 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
668 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
669 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
671 self.StoredVariables = {}
672 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
673 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
674 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
675 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
676 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
677 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
678 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
679 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
680 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
681 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
682 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
683 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
684 self.StoredVariables["CurrentEnsembleState"] = Persistence.OneMatrix(name = "CurrentEnsembleState")
685 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
686 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
687 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
688 self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
689 self.StoredVariables["ForecastCovariance"] = Persistence.OneMatrix(name = "ForecastCovariance")
690 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
691 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
692 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
693 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
694 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
695 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
696 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
697 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
698 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
699 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
700 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
701 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
702 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
703 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
704 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
705 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
706 self.StoredVariables["SampledStateForQuantiles"] = Persistence.OneMatrix(name = "SampledStateForQuantiles")
707 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
708 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
709 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
710 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
711 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
712 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
713 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
714 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
716 for k in self.StoredVariables:
717 self.__canonical_stored_name[k.lower()] = k
719 for k, v in self.__variable_names_not_public.items():
720 self.__canonical_parameter_name[k.lower()] = k
721 self.__canonical_parameter_name["algorithm"] = "Algorithm"
722 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
724 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
726 logging.debug("%s Lancement", self._name)
727 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
728 self._getTimeState(reset=True)
730 # Mise a jour des paramètres internes avec le contenu de Parameters, en
731 # reprenant les valeurs par défauts pour toutes celles non définies
732 self.__setParameters(Parameters, reset=True)
733 for k, v in self.__variable_names_not_public.items():
734 if k not in self._parameters: self.__setParameters( {k:v} )
736 # Corrections et compléments des vecteurs
737 def __test_vvalue(argument, variable, argname, symbol=None):
738 if symbol is None: symbol = variable
740 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
741 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol))
742 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
743 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol))
745 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol))
747 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
748 logging.debug("%s %s vector %s is required and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
749 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
750 logging.debug("%s %s vector %s is optional and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
752 logging.debug("%s %s vector %s is set although neither required nor optional, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
754 __test_vvalue( Xb, "Xb", "Background or initial state" )
755 __test_vvalue( Y, "Y", "Observation" )
756 __test_vvalue( U, "U", "Control" )
758 # Corrections et compléments des covariances
759 def __test_cvalue(argument, variable, argname, symbol=None):
760 if symbol is None: symbol = variable
762 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
763 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol))
764 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
765 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol))
767 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol))
769 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
770 logging.debug("%s %s error covariance matrix %s is required and set."%(self._name,argname,symbol))
771 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
772 logging.debug("%s %s error covariance matrix %s is optional and set."%(self._name,argname,symbol))
774 logging.debug("%s %s error covariance matrix %s is set although neither required nor optional."%(self._name,argname,symbol))
776 __test_cvalue( B, "B", "Background" )
777 __test_cvalue( R, "R", "Observation" )
778 __test_cvalue( Q, "Q", "Evolution" )
780 # Corrections et compléments des opérateurs
781 def __test_ovalue(argument, variable, argname, symbol=None):
782 if symbol is None: symbol = variable
783 if argument is None or (isinstance(argument,dict) and len(argument)==0):
784 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
785 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol))
786 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
787 logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol))
789 logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol))
791 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
792 logging.debug("%s %s operator %s is required and set."%(self._name,argname,symbol))
793 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
794 logging.debug("%s %s operator %s is optional and set."%(self._name,argname,symbol))
796 logging.debug("%s %s operator %s is set although neither required nor optional."%(self._name,argname,symbol))
798 __test_ovalue( HO, "HO", "Observation", "H" )
799 __test_ovalue( EM, "EM", "Evolution", "M" )
800 __test_ovalue( CM, "CM", "Control Model", "C" )
802 # Corrections et compléments des bornes
803 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
804 logging.debug("%s Bounds taken into account"%(self._name,))
806 self._parameters["Bounds"] = None
807 if ("StateBoundsForQuantiles" in self._parameters) and isinstance(self._parameters["StateBoundsForQuantiles"], (list, tuple)) and (len(self._parameters["StateBoundsForQuantiles"]) > 0):
808 logging.debug("%s Bounds for quantiles states taken into account"%(self._name,))
809 # Attention : contrairement à Bounds, pas de défaut à None, sinon on ne peut pas être sans bornes
811 # Corrections et compléments de l'initialisation en X
812 if "InitializationPoint" in self._parameters:
814 if self._parameters["InitializationPoint"] is not None and hasattr(self._parameters["InitializationPoint"],'size'):
815 if self._parameters["InitializationPoint"].size != numpy.ravel(Xb).size:
816 raise ValueError("Incompatible size %i of forced initial point that have to replace the background of size %i" \
817 %(self._parameters["InitializationPoint"].size,numpy.ravel(Xb).size))
818 # Obtenu par typecast : numpy.ravel(self._parameters["InitializationPoint"])
820 self._parameters["InitializationPoint"] = numpy.ravel(Xb)
822 if self._parameters["InitializationPoint"] is None:
823 raise ValueError("Forced initial point can not be set without any given Background or required value")
825 # Correction pour pallier a un bug de TNC sur le retour du Minimum
826 if "Minimizer" in self._parameters and self._parameters["Minimizer"] == "TNC":
827 self.setParameterValue("StoreInternalVariables",True)
829 # Verbosité et logging
830 if logging.getLogger().level < logging.WARNING:
831 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
832 self._parameters["optmessages"] = 15
834 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
835 self._parameters["optmessages"] = 0
839 def _post_run(self,_oH=None):
841 if ("StoreSupplementaryCalculations" in self._parameters) and \
842 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
843 for _A in self.StoredVariables["APosterioriCovariance"]:
844 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
845 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
846 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
847 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
848 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
849 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
850 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
851 self.StoredVariables["APosterioriCorrelations"].store( _C )
852 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
853 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
854 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
855 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
856 logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
857 logging.debug("%s Terminé", self._name)
860 def _toStore(self, key):
861 "True if in StoreSupplementaryCalculations, else False"
862 return key in self._parameters["StoreSupplementaryCalculations"]
864 def get(self, key=None):
866 Renvoie l'une des variables stockées identifiée par la clé, ou le
867 dictionnaire de l'ensemble des variables disponibles en l'absence de
868 clé. Ce sont directement les variables sous forme objet qui sont
869 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
870 des classes de persistance.
873 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
875 return self.StoredVariables
877 def __contains__(self, key=None):
878 "D.__contains__(k) -> True if D has a key k, else False"
879 if key is None or key.lower() not in self.__canonical_stored_name:
882 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
885 "D.keys() -> list of D's keys"
886 if hasattr(self, "StoredVariables"):
887 return self.StoredVariables.keys()
892 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
893 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
894 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
899 raise TypeError("pop expected at least 1 arguments, got 0")
900 "If key is not found, d is returned if given, otherwise KeyError is raised"
906 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
908 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
909 sa forme mathématique la plus naturelle possible.
911 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
913 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None, listadv = None):
915 Permet de définir dans l'algorithme des paramètres requis et leurs
916 caractéristiques par défaut.
919 raise ValueError("A name is mandatory to define a required parameter.")
921 self.__required_parameters[name] = {
923 "typecast" : typecast,
930 self.__canonical_parameter_name[name.lower()] = name
931 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
933 def getRequiredParameters(self, noDetails=True):
935 Renvoie la liste des noms de paramètres requis ou directement le
936 dictionnaire des paramètres requis.
939 return sorted(self.__required_parameters.keys())
941 return self.__required_parameters
943 def setParameterValue(self, name=None, value=None):
945 Renvoie la valeur d'un paramètre requis de manière contrôlée
947 __k = self.__canonical_parameter_name[name.lower()]
948 default = self.__required_parameters[__k]["default"]
949 typecast = self.__required_parameters[__k]["typecast"]
950 minval = self.__required_parameters[__k]["minval"]
951 maxval = self.__required_parameters[__k]["maxval"]
952 listval = self.__required_parameters[__k]["listval"]
953 listadv = self.__required_parameters[__k]["listadv"]
955 if value is None and default is None:
957 elif value is None and default is not None:
958 if typecast is None: __val = default
959 else: __val = typecast( default )
961 if typecast is None: __val = value
964 __val = typecast( value )
966 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
968 if minval is not None and (numpy.array(__val, float) < minval).any():
969 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
970 if maxval is not None and (numpy.array(__val, float) > maxval).any():
971 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
972 if listval is not None or listadv is not None:
973 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
975 if listval is not None and v in listval: continue
976 elif listadv is not None and v in listadv: continue
978 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
979 elif not (listval is not None and __val in listval) and not (listadv is not None and __val in listadv):
980 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
984 def requireInputArguments(self, mandatory=(), optional=()):
986 Permet d'imposer des arguments de calcul requis en entrée.
988 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
989 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
991 def getInputArguments(self):
993 Permet d'obtenir les listes des arguments de calcul requis en entrée.
995 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
997 def setAttributes(self, tags=()):
999 Permet d'adjoindre des attributs comme les tags de classification.
1000 Renvoie la liste actuelle dans tous les cas.
1002 self.__required_inputs["ClassificationTags"].extend( tags )
1003 return self.__required_inputs["ClassificationTags"]
1005 def __setParameters(self, fromDico={}, reset=False):
1007 Permet de stocker les paramètres reçus dans le dictionnaire interne.
1009 self._parameters.update( fromDico )
1010 __inverse_fromDico_keys = {}
1011 for k in fromDico.keys():
1012 if k.lower() in self.__canonical_parameter_name:
1013 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
1014 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
1015 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
1016 for k in self.__required_parameters.keys():
1017 if k in __canonic_fromDico_keys:
1018 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
1020 self._parameters[k] = self.setParameterValue(k)
1023 if hasattr(self._parameters[k],"__len__") and len(self._parameters[k]) > 100:
1024 logging.debug("%s %s de longueur %s", self._name, self.__required_parameters[k]["message"], len(self._parameters[k]))
1026 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
1028 def _setInternalState(self, key=None, value=None, fromDico={}, reset=False):
1030 Permet de stocker des variables nommées constituant l'état interne
1032 if reset: # Vide le dictionnaire préalablement
1033 self.__internal_state = {}
1034 if key is not None and value is not None:
1035 self.__internal_state[key] = value
1036 self.__internal_state.update( dict(fromDico) )
1038 def _getInternalState(self, key=None):
1040 Restitue un état interne sous la forme d'un dictionnaire de variables nommées
1042 if key is not None and key in self.__internal_state:
1043 return self.__internal_state[key]
1045 return self.__internal_state
1047 def _getTimeState(self, reset=False):
1049 Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde
1052 self.__initial_cpu_time = time.process_time()
1053 self.__initial_elapsed_time = time.perf_counter()
1056 self.__cpu_time = time.process_time() - self.__initial_cpu_time
1057 self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time
1058 return self.__cpu_time, self.__elapsed_time
1060 def _StopOnTimeLimit(self, X=None, withReason=False):
1061 "Stop criteria on time limit: True/False [+ Reason]"
1062 c, e = self._getTimeState()
1063 if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]:
1064 __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"])
1065 elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]:
1066 __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"])
1068 __SC, __SR = False, ""
1074 # ==============================================================================
1075 class PartialAlgorithm(object):
1077 Classe pour mimer "Algorithm" du point de vue stockage, mais sans aucune
1078 action avancée comme la vérification . Pour les méthodes reprises ici,
1079 le fonctionnement est identique à celles de la classe "Algorithm".
1081 def __init__(self, name):
1082 self._name = str( name )
1083 self._parameters = {"StoreSupplementaryCalculations":[]}
1085 self.StoredVariables = {}
1086 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
1087 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
1088 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
1089 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
1090 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
1091 self.StoredVariables["CurrentStepNumber"] = Persistence.OneIndex(name = "CurrentStepNumber")
1093 self.__canonical_stored_name = {}
1094 for k in self.StoredVariables:
1095 self.__canonical_stored_name[k.lower()] = k
1097 def _toStore(self, key):
1098 "True if in StoreSupplementaryCalculations, else False"
1099 return key in self._parameters["StoreSupplementaryCalculations"]
1101 def get(self, key=None):
1103 Renvoie l'une des variables stockées identifiée par la clé, ou le
1104 dictionnaire de l'ensemble des variables disponibles en l'absence de
1105 clé. Ce sont directement les variables sous forme objet qui sont
1106 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
1107 des classes de persistance.
1110 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
1112 return self.StoredVariables
1114 # ==============================================================================
1115 class AlgorithmAndParameters(object):
1117 Classe générale d'interface d'action pour l'algorithme et ses paramètres
1120 name = "GenericAlgorithm",
1127 self.__name = str(name)
1131 self.__algorithm = {}
1132 self.__algorithmFile = None
1133 self.__algorithmName = None
1135 self.updateParameters( asDict, asScript )
1137 if asAlgorithm is None and asScript is not None:
1138 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1140 __Algo = asAlgorithm
1142 if __Algo is not None:
1143 self.__A = str(__Algo)
1144 self.__P.update( {"Algorithm":self.__A} )
1146 self.__setAlgorithm( self.__A )
1148 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
1150 def updateParameters(self,
1154 "Mise a jour des parametres"
1155 if asDict is None and asScript is not None:
1156 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1160 if __Dict is not None:
1161 self.__P.update( dict(__Dict) )
1163 def executePythonScheme(self, asDictAO = None):
1164 "Permet de lancer le calcul d'assimilation"
1165 Operator.CM.clearCache()
1167 if not isinstance(asDictAO, dict):
1168 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1169 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
1170 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
1171 else: self.__Xb = None
1172 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
1173 else: self.__Y = asDictAO["Observation"]
1174 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
1175 else: self.__U = asDictAO["ControlInput"]
1176 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
1177 else: self.__HO = asDictAO["ObservationOperator"]
1178 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
1179 else: self.__EM = asDictAO["EvolutionModel"]
1180 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
1181 else: self.__CM = asDictAO["ControlModel"]
1182 self.__B = asDictAO["BackgroundError"]
1183 self.__R = asDictAO["ObservationError"]
1184 self.__Q = asDictAO["EvolutionError"]
1186 self.__shape_validate()
1188 self.__algorithm.run(
1198 Parameters = self.__P,
1202 def executeYACSScheme(self, FileName=None):
1203 "Permet de lancer le calcul d'assimilation"
1204 if FileName is None or not os.path.exists(FileName):
1205 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1207 __file = os.path.abspath(FileName)
1208 logging.debug("The YACS file name is \"%s\"."%__file)
1209 if not PlatformInfo.has_salome or \
1210 not PlatformInfo.has_yacs or \
1211 not PlatformInfo.has_adao:
1212 raise ImportError("\n\n"+\
1213 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
1214 "Please load the right environnement before trying to use it.\n")
1217 import SALOMERuntime
1219 SALOMERuntime.RuntimeSALOME_setRuntime()
1221 r = pilot.getRuntime()
1222 xmlLoader = loader.YACSLoader()
1223 xmlLoader.registerProcCataLoader()
1225 catalogAd = r.loadCatalog("proc", __file)
1226 r.addCatalog(catalogAd)
1231 p = xmlLoader.load(__file)
1232 except IOError as ex:
1233 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1235 logger = p.getLogger("parser")
1236 if not logger.isEmpty():
1237 print("The imported YACS XML schema has errors on parsing:")
1238 print(logger.getStr())
1241 print("The YACS XML schema is not valid and will not be executed:")
1242 print(p.getErrorReport())
1244 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1245 p.checkConsistency(info)
1246 if info.areWarningsOrErrors():
1247 print("The YACS XML schema is not coherent and will not be executed:")
1248 print(info.getGlobalRepr())
1250 e = pilot.ExecutorSwig()
1252 if p.getEffectiveState() != pilot.DONE:
1253 print(p.getErrorReport())
1257 def get(self, key = None):
1258 "Vérifie l'existence d'une clé de variable ou de paramètres"
1259 if key in self.__algorithm:
1260 return self.__algorithm.get( key )
1261 elif key in self.__P:
1262 return self.__P[key]
1264 allvariables = self.__P
1265 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1268 def pop(self, k, d):
1269 "Necessaire pour le pickling"
1270 return self.__algorithm.pop(k, d)
1272 def getAlgorithmRequiredParameters(self, noDetails=True):
1273 "Renvoie la liste des paramètres requis selon l'algorithme"
1274 return self.__algorithm.getRequiredParameters(noDetails)
1276 def getAlgorithmInputArguments(self):
1277 "Renvoie la liste des entrées requises selon l'algorithme"
1278 return self.__algorithm.getInputArguments()
1280 def getAlgorithmAttributes(self):
1281 "Renvoie la liste des attributs selon l'algorithme"
1282 return self.__algorithm.setAttributes()
1284 def setObserver(self, __V, __O, __I, __S):
1285 if self.__algorithm is None \
1286 or isinstance(self.__algorithm, dict) \
1287 or not hasattr(self.__algorithm,"StoredVariables"):
1288 raise ValueError("No observer can be build before choosing an algorithm.")
1289 if __V not in self.__algorithm:
1290 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1292 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1295 HookParameters = __I,
1298 def removeObserver(self, __V, __O, __A = False):
1299 if self.__algorithm is None \
1300 or isinstance(self.__algorithm, dict) \
1301 or not hasattr(self.__algorithm,"StoredVariables"):
1302 raise ValueError("No observer can be removed before choosing an algorithm.")
1303 if __V not in self.__algorithm:
1304 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1306 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1311 def hasObserver(self, __V):
1312 if self.__algorithm is None \
1313 or isinstance(self.__algorithm, dict) \
1314 or not hasattr(self.__algorithm,"StoredVariables"):
1316 if __V not in self.__algorithm:
1318 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1321 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1322 for k in self.__variable_names_not_public:
1323 if k in __allvariables: __allvariables.remove(k)
1324 return __allvariables
1326 def __contains__(self, key=None):
1327 "D.__contains__(k) -> True if D has a key k, else False"
1328 return key in self.__algorithm or key in self.__P
1331 "x.__repr__() <==> repr(x)"
1332 return repr(self.__A)+", "+repr(self.__P)
1335 "x.__str__() <==> str(x)"
1336 return str(self.__A)+", "+str(self.__P)
1338 def __setAlgorithm(self, choice = None ):
1340 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1341 d'assimilation. L'argument est un champ caractère se rapportant au nom
1342 d'un algorithme réalisant l'opération sur les arguments fixes.
1345 raise ValueError("Error: algorithm choice has to be given")
1346 if self.__algorithmName is not None:
1347 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1348 daDirectory = "daAlgorithms"
1350 # Recherche explicitement le fichier complet
1351 # ------------------------------------------
1353 for directory in sys.path:
1354 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1355 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1356 if module_path is None:
1357 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1359 # Importe le fichier complet comme un module
1360 # ------------------------------------------
1362 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1363 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1364 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1365 raise ImportError("this module does not define a valid elementary algorithm.")
1366 self.__algorithmName = str(choice)
1367 sys.path = sys_path_tmp ; del sys_path_tmp
1368 except ImportError as e:
1369 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1371 # Instancie un objet du type élémentaire du fichier
1372 # -------------------------------------------------
1373 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1376 def __shape_validate(self):
1378 Validation de la correspondance correcte des tailles des variables et
1379 des matrices s'il y en a.
1381 if self.__Xb is None: __Xb_shape = (0,)
1382 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1383 elif hasattr(self.__Xb,"shape"):
1384 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1385 else: __Xb_shape = self.__Xb.shape()
1386 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1388 if self.__Y is None: __Y_shape = (0,)
1389 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1390 elif hasattr(self.__Y,"shape"):
1391 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1392 else: __Y_shape = self.__Y.shape()
1393 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1395 if self.__U is None: __U_shape = (0,)
1396 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1397 elif hasattr(self.__U,"shape"):
1398 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1399 else: __U_shape = self.__U.shape()
1400 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1402 if self.__B is None: __B_shape = (0,0)
1403 elif hasattr(self.__B,"shape"):
1404 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1405 else: __B_shape = self.__B.shape()
1406 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1408 if self.__R is None: __R_shape = (0,0)
1409 elif hasattr(self.__R,"shape"):
1410 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1411 else: __R_shape = self.__R.shape()
1412 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1414 if self.__Q is None: __Q_shape = (0,0)
1415 elif hasattr(self.__Q,"shape"):
1416 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1417 else: __Q_shape = self.__Q.shape()
1418 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1420 if len(self.__HO) == 0: __HO_shape = (0,0)
1421 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1422 elif hasattr(self.__HO["Direct"],"shape"):
1423 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1424 else: __HO_shape = self.__HO["Direct"].shape()
1425 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1427 if len(self.__EM) == 0: __EM_shape = (0,0)
1428 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1429 elif hasattr(self.__EM["Direct"],"shape"):
1430 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1431 else: __EM_shape = self.__EM["Direct"].shape()
1432 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1434 if len(self.__CM) == 0: __CM_shape = (0,0)
1435 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1436 elif hasattr(self.__CM["Direct"],"shape"):
1437 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1438 else: __CM_shape = self.__CM["Direct"].shape()
1439 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1441 # Vérification des conditions
1442 # ---------------------------
1443 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1444 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1445 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1446 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1448 if not( min(__B_shape) == max(__B_shape) ):
1449 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1450 if not( min(__R_shape) == max(__R_shape) ):
1451 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1452 if not( min(__Q_shape) == max(__Q_shape) ):
1453 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1454 if not( min(__EM_shape) == max(__EM_shape) ):
1455 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1457 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1458 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1459 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1460 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1461 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1462 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1463 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1464 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1466 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1467 if self.__algorithmName in ["EnsembleBlue",]:
1468 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1469 self.__Xb = Persistence.OneVector("Background")
1470 for member in asPersistentVector:
1471 self.__Xb.store( numpy.asarray(member, dtype=float) )
1472 __Xb_shape = min(__B_shape)
1474 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1476 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1477 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1479 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1480 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1482 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1483 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1485 if ("Bounds" in self.__P) \
1486 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1487 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1488 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1489 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1491 if ("StateBoundsForQuantiles" in self.__P) \
1492 and (isinstance(self.__P["StateBoundsForQuantiles"], list) or isinstance(self.__P["StateBoundsForQuantiles"], tuple)) \
1493 and (len(self.__P["StateBoundsForQuantiles"]) != max(__Xb_shape)):
1494 raise ValueError("The number \"%s\" of bound pairs for the quantile state (X) components is different of the size \"%s\" of the state itself." \
1495 %(len(self.__P["StateBoundsForQuantiles"]),max(__Xb_shape)))
1499 # ==============================================================================
1500 class RegulationAndParameters(object):
1502 Classe générale d'interface d'action pour la régulation et ses paramètres
1505 name = "GenericRegulation",
1512 self.__name = str(name)
1515 if asAlgorithm is None and asScript is not None:
1516 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1518 __Algo = asAlgorithm
1520 if asDict is None and asScript is not None:
1521 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1525 if __Dict is not None:
1526 self.__P.update( dict(__Dict) )
1528 if __Algo is not None:
1529 self.__P.update( {"Algorithm":str(__Algo)} )
1531 def get(self, key = None):
1532 "Vérifie l'existence d'une clé de variable ou de paramètres"
1534 return self.__P[key]
1538 # ==============================================================================
1539 class DataObserver(object):
1541 Classe générale d'interface de type observer
1544 name = "GenericObserver",
1556 self.__name = str(name)
1561 if onVariable is None:
1562 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1563 elif type(onVariable) in (tuple, list):
1564 self.__V = tuple(map( str, onVariable ))
1565 if withInfo is None:
1568 self.__I = (str(withInfo),)*len(self.__V)
1569 elif isinstance(onVariable, str):
1570 self.__V = (onVariable,)
1571 if withInfo is None:
1572 self.__I = (onVariable,)
1574 self.__I = (str(withInfo),)
1576 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1578 if asObsObject is not None:
1579 self.__O = asObsObject
1581 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1582 __Function = Observer2Func(__FunctionText)
1583 self.__O = __Function.getfunc()
1585 for k in range(len(self.__V)):
1588 if ename not in withAlgo:
1589 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1591 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1594 "x.__repr__() <==> repr(x)"
1595 return repr(self.__V)+"\n"+repr(self.__O)
1598 "x.__str__() <==> str(x)"
1599 return str(self.__V)+"\n"+str(self.__O)
1601 # ==============================================================================
1602 class UserScript(object):
1604 Classe générale d'interface de type texte de script utilisateur
1607 name = "GenericUserScript",
1614 self.__name = str(name)
1616 if asString is not None:
1618 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1619 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1620 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1621 self.__F = Templates.ObserverTemplates[asTemplate]
1622 elif asScript is not None:
1623 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1628 "x.__repr__() <==> repr(x)"
1629 return repr(self.__F)
1632 "x.__str__() <==> str(x)"
1633 return str(self.__F)
1635 # ==============================================================================
1636 class ExternalParameters(object):
1638 Classe générale d'interface de type texte de script utilisateur
1641 name = "GenericExternalParameters",
1647 self.__name = str(name)
1650 self.updateParameters( asDict, asScript )
1652 def updateParameters(self,
1656 "Mise a jour des parametres"
1657 if asDict is None and asScript is not None:
1658 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1662 if __Dict is not None:
1663 self.__P.update( dict(__Dict) )
1665 def get(self, key = None):
1667 return self.__P[key]
1669 return list(self.__P.keys())
1672 return list(self.__P.keys())
1674 def pop(self, k, d):
1675 return self.__P.pop(k, d)
1678 return self.__P.items()
1680 def __contains__(self, key=None):
1681 "D.__contains__(k) -> True if D has a key k, else False"
1682 return key in self.__P
1684 # ==============================================================================
1685 class State(object):
1687 Classe générale d'interface de type état
1690 name = "GenericVector",
1692 asPersistentVector = None,
1698 toBeChecked = False,
1701 Permet de définir un vecteur :
1702 - asVector : entrée des données, comme un vecteur compatible avec le
1703 constructeur de numpy.matrix, ou "True" si entrée par script.
1704 - asPersistentVector : entrée des données, comme une série de vecteurs
1705 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1706 type Persistence, ou "True" si entrée par script.
1707 - asScript : si un script valide est donné contenant une variable
1708 nommée "name", la variable est de type "asVector" (par défaut) ou
1709 "asPersistentVector" selon que l'une de ces variables est placée à
1711 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1712 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1713 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1714 nommée "name"), on récupère les colonnes et on les range ligne après
1715 ligne (colMajor=False, par défaut) ou colonne après colonne
1716 (colMajor=True). La variable résultante est de type "asVector" (par
1717 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1720 self.__name = str(name)
1721 self.__check = bool(toBeChecked)
1725 self.__is_vector = False
1726 self.__is_series = False
1728 if asScript is not None:
1729 __Vector, __Series = None, None
1730 if asPersistentVector:
1731 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1733 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1734 elif asDataFile is not None:
1735 __Vector, __Series = None, None
1736 if asPersistentVector:
1737 if colNames is not None:
1738 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1740 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1741 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1742 __Series = numpy.transpose(__Series)
1743 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1744 __Series = numpy.transpose(__Series)
1746 if colNames is not None:
1747 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1749 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1751 __Vector = numpy.ravel(__Vector, order = "F")
1753 __Vector = numpy.ravel(__Vector, order = "C")
1755 __Vector, __Series = asVector, asPersistentVector
1757 if __Vector is not None:
1758 self.__is_vector = True
1759 if isinstance(__Vector, str):
1760 __Vector = PlatformInfo.strvect2liststr( __Vector )
1761 self.__V = numpy.ravel(numpy.asarray( __Vector, dtype=float )).reshape((-1,1))
1762 self.shape = self.__V.shape
1763 self.size = self.__V.size
1764 elif __Series is not None:
1765 self.__is_series = True
1766 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1767 self.__V = Persistence.OneVector(self.__name)
1768 if isinstance(__Series, str):
1769 __Series = PlatformInfo.strmatrix2liststr(__Series)
1770 for member in __Series:
1771 if isinstance(member, str):
1772 member = PlatformInfo.strvect2liststr( member )
1773 self.__V.store(numpy.asarray( member, dtype=float ))
1776 if isinstance(self.__V.shape, (tuple, list)):
1777 self.shape = self.__V.shape
1779 self.shape = self.__V.shape()
1780 if len(self.shape) == 1:
1781 self.shape = (self.shape[0],1)
1782 self.size = self.shape[0] * self.shape[1]
1784 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1786 if scheduledBy is not None:
1787 self.__T = scheduledBy
1789 def getO(self, withScheduler=False):
1791 return self.__V, self.__T
1792 elif self.__T is None:
1798 "Vérification du type interne"
1799 return self.__is_vector
1802 "Vérification du type interne"
1803 return self.__is_series
1806 "x.__repr__() <==> repr(x)"
1807 return repr(self.__V)
1810 "x.__str__() <==> str(x)"
1811 return str(self.__V)
1813 # ==============================================================================
1814 class Covariance(object):
1816 Classe générale d'interface de type covariance
1819 name = "GenericCovariance",
1820 asCovariance = None,
1821 asEyeByScalar = None,
1822 asEyeByVector = None,
1825 toBeChecked = False,
1828 Permet de définir une covariance :
1829 - asCovariance : entrée des données, comme une matrice compatible avec
1830 le constructeur de numpy.matrix
1831 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1832 multiplicatif d'une matrice de corrélation identité, aucune matrice
1833 n'étant donc explicitement à donner
1834 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1835 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1836 n'étant donc explicitement à donner
1837 - asCovObject : entrée des données comme un objet python, qui a les
1838 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1839 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1840 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1841 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1842 pleine doit être vérifié
1844 self.__name = str(name)
1845 self.__check = bool(toBeChecked)
1848 self.__is_scalar = False
1849 self.__is_vector = False
1850 self.__is_matrix = False
1851 self.__is_object = False
1853 if asScript is not None:
1854 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1856 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1858 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1860 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1862 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1864 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1866 if __Scalar is not None:
1867 if isinstance(__Scalar, str):
1868 __Scalar = PlatformInfo.strvect2liststr( __Scalar )
1869 if len(__Scalar) > 0: __Scalar = __Scalar[0]
1870 if numpy.array(__Scalar).size != 1:
1871 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.array(__Scalar).size)
1872 self.__is_scalar = True
1873 self.__C = numpy.abs( float(__Scalar) )
1876 elif __Vector is not None:
1877 if isinstance(__Vector, str):
1878 __Vector = PlatformInfo.strvect2liststr( __Vector )
1879 self.__is_vector = True
1880 self.__C = numpy.abs( numpy.ravel(numpy.asarray( __Vector, dtype=float )) )
1881 self.shape = (self.__C.size,self.__C.size)
1882 self.size = self.__C.size**2
1883 elif __Matrix is not None:
1884 self.__is_matrix = True
1885 self.__C = numpy.matrix( __Matrix, float )
1886 self.shape = self.__C.shape
1887 self.size = self.__C.size
1888 elif __Object is not None:
1889 self.__is_object = True
1891 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__matmul__","__mul__","__rmatmul__","__rmul__"):
1892 if not hasattr(self.__C,at):
1893 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1894 if hasattr(self.__C,"shape"):
1895 self.shape = self.__C.shape
1898 if hasattr(self.__C,"size"):
1899 self.size = self.__C.size
1904 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1908 def __validate(self):
1910 if self.__C is None:
1911 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1912 if self.ismatrix() and min(self.shape) != max(self.shape):
1913 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1914 if self.isobject() and min(self.shape) != max(self.shape):
1915 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1916 if self.isscalar() and self.__C <= 0:
1917 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1918 if self.isvector() and (self.__C <= 0).any():
1919 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1920 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1922 numpy.linalg.cholesky( self.__C )
1924 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1925 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1929 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1932 "Vérification du type interne"
1933 return self.__is_scalar
1936 "Vérification du type interne"
1937 return self.__is_vector
1940 "Vérification du type interne"
1941 return self.__is_matrix
1944 "Vérification du type interne"
1945 return self.__is_object
1950 return Covariance(self.__name+"I", asCovariance = numpy.linalg.inv(self.__C) )
1951 elif self.isvector():
1952 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1953 elif self.isscalar():
1954 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1955 elif self.isobject() and hasattr(self.__C,"getI"):
1956 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1958 return None # Indispensable
1963 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1964 elif self.isvector():
1965 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1966 elif self.isscalar():
1967 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1968 elif self.isobject() and hasattr(self.__C,"getT"):
1969 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1971 raise AttributeError("the %s covariance matrix has no getT attribute."%(self.__name,))
1974 "Décomposition de Cholesky"
1976 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1977 elif self.isvector():
1978 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1979 elif self.isscalar():
1980 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1981 elif self.isobject() and hasattr(self.__C,"cholesky"):
1982 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1984 raise AttributeError("the %s covariance matrix has no cholesky attribute."%(self.__name,))
1986 def choleskyI(self):
1987 "Inversion de la décomposition de Cholesky"
1989 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.linalg.cholesky(self.__C)) )
1990 elif self.isvector():
1991 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1992 elif self.isscalar():
1993 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1994 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1995 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1997 raise AttributeError("the %s covariance matrix has no choleskyI attribute."%(self.__name,))
2000 "Racine carrée matricielle"
2003 return Covariance(self.__name+"C", asCovariance = numpy.real(scipy.linalg.sqrtm(self.__C)) )
2004 elif self.isvector():
2005 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
2006 elif self.isscalar():
2007 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
2008 elif self.isobject() and hasattr(self.__C,"sqrtm"):
2009 return Covariance(self.__name+"C", asCovObject = self.__C.sqrtm() )
2011 raise AttributeError("the %s covariance matrix has no sqrtm attribute."%(self.__name,))
2014 "Inversion de la racine carrée matricielle"
2017 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.real(scipy.linalg.sqrtm(self.__C))) )
2018 elif self.isvector():
2019 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
2020 elif self.isscalar():
2021 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
2022 elif self.isobject() and hasattr(self.__C,"sqrtmI"):
2023 return Covariance(self.__name+"H", asCovObject = self.__C.sqrtmI() )
2025 raise AttributeError("the %s covariance matrix has no sqrtmI attribute."%(self.__name,))
2027 def diag(self, msize=None):
2028 "Diagonale de la matrice"
2030 return numpy.diag(self.__C)
2031 elif self.isvector():
2033 elif self.isscalar():
2035 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2037 return self.__C * numpy.ones(int(msize))
2038 elif self.isobject() and hasattr(self.__C,"diag"):
2039 return self.__C.diag()
2041 raise AttributeError("the %s covariance matrix has no diag attribute."%(self.__name,))
2043 def trace(self, msize=None):
2044 "Trace de la matrice"
2046 return numpy.trace(self.__C)
2047 elif self.isvector():
2048 return float(numpy.sum(self.__C))
2049 elif self.isscalar():
2051 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2053 return self.__C * int(msize)
2054 elif self.isobject():
2055 return self.__C.trace()
2057 raise AttributeError("the %s covariance matrix has no trace attribute."%(self.__name,))
2059 def asfullmatrix(self, msize=None):
2062 return numpy.asarray(self.__C, dtype=float)
2063 elif self.isvector():
2064 return numpy.asarray( numpy.diag(self.__C), dtype=float )
2065 elif self.isscalar():
2067 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2069 return numpy.asarray( self.__C * numpy.eye(int(msize)), dtype=float )
2070 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
2071 return self.__C.asfullmatrix()
2073 raise AttributeError("the %s covariance matrix has no asfullmatrix attribute."%(self.__name,))
2075 def assparsematrix(self):
2083 "x.__repr__() <==> repr(x)"
2084 return repr(self.__C)
2087 "x.__str__() <==> str(x)"
2088 return str(self.__C)
2090 def __add__(self, other):
2091 "x.__add__(y) <==> x+y"
2092 if self.ismatrix() or self.isobject():
2093 return self.__C + numpy.asmatrix(other)
2094 elif self.isvector() or self.isscalar():
2095 _A = numpy.asarray(other)
2096 if len(_A.shape) == 1:
2097 _A.reshape((-1,1))[::2] += self.__C
2099 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
2100 return numpy.asmatrix(_A)
2102 def __radd__(self, other):
2103 "x.__radd__(y) <==> y+x"
2104 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
2106 def __sub__(self, other):
2107 "x.__sub__(y) <==> x-y"
2108 if self.ismatrix() or self.isobject():
2109 return self.__C - numpy.asmatrix(other)
2110 elif self.isvector() or self.isscalar():
2111 _A = numpy.asarray(other)
2112 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
2113 return numpy.asmatrix(_A)
2115 def __rsub__(self, other):
2116 "x.__rsub__(y) <==> y-x"
2117 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
2120 "x.__neg__() <==> -x"
2123 def __matmul__(self, other):
2124 "x.__mul__(y) <==> x@y"
2125 if self.ismatrix() and isinstance(other, (int, float)):
2126 return numpy.asarray(self.__C) * other
2127 elif self.ismatrix() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2128 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2129 return numpy.ravel(self.__C @ numpy.ravel(other))
2130 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2131 return numpy.asarray(self.__C) @ numpy.asarray(other)
2133 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asarray(other).shape,self.__name))
2134 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2135 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2136 return numpy.ravel(self.__C) * numpy.ravel(other)
2137 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2138 return numpy.ravel(self.__C).reshape((-1,1)) * numpy.asarray(other)
2140 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2141 elif self.isscalar() and isinstance(other,numpy.matrix):
2142 return numpy.asarray(self.__C * other)
2143 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2144 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2145 return self.__C * numpy.ravel(other)
2147 return self.__C * numpy.asarray(other)
2148 elif self.isobject():
2149 return self.__C.__matmul__(other)
2151 raise NotImplementedError("%s covariance matrix __matmul__ method not available for %s type!"%(self.__name,type(other)))
2153 def __mul__(self, other):
2154 "x.__mul__(y) <==> x*y"
2155 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2156 return self.__C * other
2157 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2158 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2159 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2160 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2161 return self.__C * numpy.asmatrix(other)
2163 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
2164 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2165 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2166 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
2167 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2168 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
2170 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2171 elif self.isscalar() and isinstance(other,numpy.matrix):
2172 return self.__C * other
2173 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2174 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2175 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2177 return self.__C * numpy.asmatrix(other)
2178 elif self.isobject():
2179 return self.__C.__mul__(other)
2181 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
2183 def __rmatmul__(self, other):
2184 "x.__rmul__(y) <==> y@x"
2185 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2186 return other * self.__C
2187 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2188 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2189 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2190 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2191 return numpy.asmatrix(other) * self.__C
2193 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2194 elif self.isvector() and isinstance(other,numpy.matrix):
2195 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2196 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2197 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2198 return numpy.asmatrix(numpy.array(other) * self.__C)
2200 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2201 elif self.isscalar() and isinstance(other,numpy.matrix):
2202 return other * self.__C
2203 elif self.isobject():
2204 return self.__C.__rmatmul__(other)
2206 raise NotImplementedError("%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name,type(other)))
2208 def __rmul__(self, other):
2209 "x.__rmul__(y) <==> y*x"
2210 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2211 return other * self.__C
2212 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2213 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2214 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2215 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2216 return numpy.asmatrix(other) * self.__C
2218 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2219 elif self.isvector() and isinstance(other,numpy.matrix):
2220 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2221 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2222 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2223 return numpy.asmatrix(numpy.array(other) * self.__C)
2225 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2226 elif self.isscalar() and isinstance(other,numpy.matrix):
2227 return other * self.__C
2228 elif self.isscalar() and isinstance(other,float):
2229 return other * self.__C
2230 elif self.isobject():
2231 return self.__C.__rmul__(other)
2233 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
2236 "x.__len__() <==> len(x)"
2237 return self.shape[0]
2239 # ==============================================================================
2240 class Observer2Func(object):
2242 Création d'une fonction d'observateur a partir de son texte
2244 def __init__(self, corps=""):
2245 self.__corps = corps
2246 def func(self,var,info):
2247 "Fonction d'observation"
2250 "Restitution du pointeur de fonction dans l'objet"
2253 # ==============================================================================
2254 class CaseLogger(object):
2256 Conservation des commandes de création d'un cas
2258 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
2259 self.__name = str(__name)
2260 self.__objname = str(__objname)
2261 self.__logSerie = []
2262 self.__switchoff = False
2264 "TUI" :Interfaces._TUIViewer,
2265 "SCD" :Interfaces._SCDViewer,
2266 "YACS":Interfaces._YACSViewer,
2267 "SimpleReportInRst":Interfaces._SimpleReportInRstViewer,
2268 "SimpleReportInHtml":Interfaces._SimpleReportInHtmlViewer,
2269 "SimpleReportInPlainTxt":Interfaces._SimpleReportInPlainTxtViewer,
2272 "TUI" :Interfaces._TUIViewer,
2273 "COM" :Interfaces._COMViewer,
2275 if __addViewers is not None:
2276 self.__viewers.update(dict(__addViewers))
2277 if __addLoaders is not None:
2278 self.__loaders.update(dict(__addLoaders))
2280 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
2281 "Enregistrement d'une commande individuelle"
2282 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
2283 if "self" in __keys: __keys.remove("self")
2284 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
2286 self.__switchoff = True
2288 self.__switchoff = False
2290 def dump(self, __filename=None, __format="TUI", __upa=""):
2291 "Restitution normalisée des commandes"
2292 if __format in self.__viewers:
2293 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2295 raise ValueError("Dumping as \"%s\" is not available"%__format)
2296 return __formater.dump(__filename, __upa)
2298 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2299 "Chargement normalisé des commandes"
2300 if __format in self.__loaders:
2301 __formater = self.__loaders[__format]()
2303 raise ValueError("Loading as \"%s\" is not available"%__format)
2304 return __formater.load(__filename, __content, __object)
2306 # ==============================================================================
2309 _extraArguments = None,
2310 _sFunction = lambda x: x,
2315 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2316 correspondante de valeurs de la fonction en argument
2318 # Vérifications et définitions initiales
2319 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2320 if not PlatformInfo.isIterable( __xserie ):
2321 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2323 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2326 __mpWorkers = int(_mpWorkers)
2328 import multiprocessing
2339 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2340 with multiprocessing.Pool(__mpWorkers) as pool:
2341 __multiHX = pool.map( _sFunction, _jobs )
2344 # logging.debug("MULTF Internal multiprocessing calculation end")
2346 # logging.debug("MULTF Internal monoprocessing calculation begin")
2348 if _extraArguments is None:
2349 for __xvalue in __xserie:
2350 __multiHX.append( _sFunction( __xvalue ) )
2351 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2352 for __xvalue in __xserie:
2353 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2354 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2355 for __xvalue in __xserie:
2356 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2358 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2359 # logging.debug("MULTF Internal monoprocessing calculation end")
2361 # logging.debug("MULTF Internal multifonction calculations end")
2364 # ==============================================================================
2365 if __name__ == "__main__":
2366 print('\n AUTODIAGNOSTIC\n')