1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
35 from functools import partial
36 from daCore import Persistence, PlatformInfo, Interfaces
37 from daCore import Templates
39 # ==============================================================================
40 class CacheManager(object):
42 Classe générale de gestion d'un cache de calculs
45 toleranceInRedundancy = 1.e-18,
46 lenghtOfRedundancy = -1,
49 Les caractéristiques de tolérance peuvent être modifiées à la création.
51 self.__tolerBP = float(toleranceInRedundancy)
52 self.__lenghtOR = int(lenghtOfRedundancy)
53 self.__initlnOR = self.__lenghtOR
63 def wasCalculatedIn(self, xValue, oName="" ):
64 "Vérifie l'existence d'un calcul correspondant à la valeur"
68 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
69 if not hasattr(xValue, 'size'):
71 elif (str(oName) != self.__listOPCV[i][3]):
73 elif (xValue.size != self.__listOPCV[i][0].size):
75 elif (numpy.ravel(xValue)[0] - self.__listOPCV[i][0][0]) > (self.__tolerBP * self.__listOPCV[i][2] / self.__listOPCV[i][0].size):
77 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < (self.__tolerBP * self.__listOPCV[i][2]):
79 __HxV = self.__listOPCV[i][1]
83 def storeValueInX(self, xValue, HxValue, oName="" ):
84 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
85 if self.__lenghtOR < 0:
86 self.__lenghtOR = 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
87 self.__initlnOR = self.__lenghtOR
88 self.__seenNames.append(str(oName))
89 if str(oName) not in self.__seenNames: # Etend la liste si nouveau
90 self.__lenghtOR += 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
91 self.__initlnOR += self.__lenghtOR
92 self.__seenNames.append(str(oName))
93 while len(self.__listOPCV) > self.__lenghtOR:
94 self.__listOPCV.pop(0)
95 self.__listOPCV.append( (
96 copy.copy(numpy.ravel(xValue)), # 0 Previous point
97 copy.copy(HxValue), # 1 Previous value
98 numpy.linalg.norm(xValue), # 2 Norm
99 str(oName), # 3 Operator name
104 self.__initlnOR = self.__lenghtOR
106 self.__enabled = False
110 self.__lenghtOR = self.__initlnOR
111 self.__enabled = True
113 # ==============================================================================
114 class Operator(object):
116 Classe générale d'interface de type opérateur simple
124 name = "GenericOperator",
127 avoidingRedundancy = True,
128 reducingMemoryUse = False,
129 inputAsMultiFunction = False,
130 enableMultiProcess = False,
131 extraArguments = None,
134 On construit un objet de ce type en fournissant, à l'aide de l'un des
135 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
138 - name : nom d'opérateur
139 - fromMethod : argument de type fonction Python
140 - fromMatrix : argument adapté au constructeur numpy.matrix
141 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
142 - reducingMemoryUse : booléen forçant (ou pas) des calculs moins
144 - inputAsMultiFunction : booléen indiquant une fonction explicitement
145 définie (ou pas) en multi-fonction
146 - extraArguments : arguments supplémentaires passés à la fonction de
147 base et ses dérivées (tuple ou dictionnaire)
149 self.__name = str(name)
150 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
151 self.__reduceM = bool( reducingMemoryUse )
152 self.__avoidRC = bool( avoidingRedundancy )
153 self.__inputAsMF = bool( inputAsMultiFunction )
154 self.__mpEnabled = bool( enableMultiProcess )
155 self.__extraArgs = extraArguments
156 if fromMethod is not None and self.__inputAsMF:
157 self.__Method = fromMethod # logtimer(fromMethod)
159 self.__Type = "Method"
160 elif fromMethod is not None and not self.__inputAsMF:
161 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
163 self.__Type = "Method"
164 elif fromMatrix is not None:
166 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
167 self.__Type = "Matrix"
173 def disableAvoidingRedundancy(self):
175 Operator.CM.disable()
177 def enableAvoidingRedundancy(self):
182 Operator.CM.disable()
188 def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False):
190 Permet de restituer le résultat de l'application de l'opérateur à une
191 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
192 argument devant a priori être du bon type.
194 - les arguments par série sont :
195 - xValue : argument adapté pour appliquer l'opérateur
196 - HValue : valeur précalculée de l'opérateur en ce point
197 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
204 if HValue is not None:
208 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
210 if _HValue is not None:
211 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
213 for i in range(len(_HValue)):
214 _HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T )
216 Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name)
221 for i, xv in enumerate(_xValue):
223 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
225 __alreadyCalculated = False
227 if __alreadyCalculated:
228 self.__addOneCacheCall()
231 if self.__Matrix is not None:
232 self.__addOneMatrixCall()
233 _xv = numpy.ravel(xv).reshape((-1,1))
234 _hv = self.__Matrix * _xv
236 self.__addOneMethodCall()
240 _HxValue.append( _hv )
242 if len(_xserie)>0 and self.__Matrix is None:
243 if self.__extraArgs is None:
244 _hserie = self.__Method( _xserie ) # Calcul MF
246 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
247 if not hasattr(_hserie, "pop"):
248 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
254 Operator.CM.storeValueInX(_xv,_hv,self.__name)
256 if returnSerieAsArrayMatrix:
257 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
259 if argsAsSerie: return _HxValue
260 else: return _HxValue[-1]
262 def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
264 Permet de restituer le résultat de l'application de l'opérateur à des
265 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
266 argument devant a priori être du bon type. Si la uValue est None,
267 on suppose que l'opérateur ne s'applique qu'à xValue.
269 - paires : les arguments par paire sont :
270 - xValue : argument X adapté pour appliquer l'opérateur
271 - uValue : argument U adapté pour appliquer l'opérateur
272 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
274 if argsAsSerie: _xuValue = paires
275 else: _xuValue = (paires,)
276 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
278 if self.__Matrix is not None:
280 for paire in _xuValue:
281 _xValue, _uValue = paire
282 _xValue = numpy.matrix(numpy.ravel(_xValue)).T
283 self.__addOneMatrixCall()
284 _HxValue.append( self.__Matrix * _xValue )
287 for paire in _xuValue:
288 _xValue, _uValue = paire
289 if _uValue is not None:
290 _xuArgs.append( paire )
292 _xuArgs.append( _xValue )
293 self.__addOneMethodCall( len(_xuArgs) )
294 if self.__extraArgs is None:
295 _HxValue = self.__Method( _xuArgs ) # Calcul MF
297 _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF
299 if returnSerieAsArrayMatrix:
300 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
302 if argsAsSerie: return _HxValue
303 else: return _HxValue[-1]
305 def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
307 Permet de restituer le résultat de l'application de l'opérateur à une
308 série d'arguments xValue, sachant que l'opérateur est valable en
309 xNominal. Cette méthode se contente d'appliquer, son argument devant a
310 priori être du bon type. Si l'opérateur est linéaire car c'est une
311 matrice, alors il est valable en tout point nominal et xNominal peut
312 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
313 permet d'indiquer que l'argument est multi-paires.
315 - paires : les arguments par paire sont :
316 - xNominal : série d'arguments permettant de donner le point où
317 l'opérateur est construit pour être ensuite appliqué
318 - xValue : série d'arguments adaptés pour appliquer l'opérateur
319 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
321 if argsAsSerie: _nxValue = paires
322 else: _nxValue = (paires,)
323 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
325 if self.__Matrix is not None:
327 for paire in _nxValue:
328 _xNominal, _xValue = paire
329 _xValue = numpy.matrix(numpy.ravel(_xValue)).T
330 self.__addOneMatrixCall()
331 _HxValue.append( self.__Matrix * _xValue )
333 self.__addOneMethodCall( len(_nxValue) )
334 if self.__extraArgs is None:
335 _HxValue = self.__Method( _nxValue ) # Calcul MF
337 _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
339 if returnSerieAsArrayMatrix:
340 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
342 if argsAsSerie: return _HxValue
343 else: return _HxValue[-1]
345 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
347 Permet de renvoyer l'opérateur sous la forme d'une matrice
349 if self.__Matrix is not None:
350 self.__addOneMatrixCall()
351 mValue = [self.__Matrix,]
352 elif not isinstance(ValueForMethodForm,str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
355 self.__addOneMethodCall( len(ValueForMethodForm) )
356 for _vfmf in ValueForMethodForm:
357 mValue.append( numpy.matrix( self.__Method(((_vfmf, None),)) ) )
359 self.__addOneMethodCall()
360 mValue = self.__Method(((ValueForMethodForm, None),))
362 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
364 if argsAsSerie: return mValue
365 else: return mValue[-1]
369 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
370 la forme d'une matrice
372 if self.__Matrix is not None:
373 return self.__Matrix.shape
375 raise ValueError("Matrix form of the operator is not available, nor the shape")
377 def nbcalls(self, which=None):
379 Renvoie les nombres d'évaluations de l'opérateur
382 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
383 self.__NbCallsAsMatrix,
384 self.__NbCallsAsMethod,
385 self.__NbCallsOfCached,
386 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
387 Operator.NbCallsAsMatrix,
388 Operator.NbCallsAsMethod,
389 Operator.NbCallsOfCached,
391 if which is None: return __nbcalls
392 else: return __nbcalls[which]
394 def __addOneMatrixCall(self):
395 "Comptabilise un appel"
396 self.__NbCallsAsMatrix += 1 # Decompte local
397 Operator.NbCallsAsMatrix += 1 # Decompte global
399 def __addOneMethodCall(self, nb = 1):
400 "Comptabilise un appel"
401 self.__NbCallsAsMethod += nb # Decompte local
402 Operator.NbCallsAsMethod += nb # Decompte global
404 def __addOneCacheCall(self):
405 "Comptabilise un appel"
406 self.__NbCallsOfCached += 1 # Decompte local
407 Operator.NbCallsOfCached += 1 # Decompte global
409 # ==============================================================================
410 class FullOperator(object):
412 Classe générale d'interface de type opérateur complet
413 (Direct, Linéaire Tangent, Adjoint)
416 name = "GenericFullOperator",
418 asOneFunction = None, # 1 Fonction
419 asThreeFunctions = None, # 3 Fonctions in a dictionary
420 asScript = None, # 1 or 3 Fonction(s) by script
421 asDict = None, # Parameters
423 extraArguments = None,
424 performancePrf = None,
425 inputAsMF = False,# Fonction(s) as Multi-Functions
430 self.__name = str(name)
431 self.__check = bool(toBeChecked)
432 self.__extraArgs = extraArguments
437 if (asDict is not None) and isinstance(asDict, dict):
438 __Parameters.update( asDict )
439 # Priorité à EnableMultiProcessingInDerivatives=True
440 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
441 __Parameters["EnableMultiProcessingInDerivatives"] = True
442 __Parameters["EnableMultiProcessingInEvaluation"] = False
443 if "EnableMultiProcessingInDerivatives" not in __Parameters:
444 __Parameters["EnableMultiProcessingInDerivatives"] = False
445 if __Parameters["EnableMultiProcessingInDerivatives"]:
446 __Parameters["EnableMultiProcessingInEvaluation"] = False
447 if "EnableMultiProcessingInEvaluation" not in __Parameters:
448 __Parameters["EnableMultiProcessingInEvaluation"] = False
449 if "withIncrement" in __Parameters: # Temporaire
450 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
451 # Le défaut est équivalent à "ReducedOverallRequirements"
452 __reduceM, __avoidRC = True, True
453 if performancePrf is not None:
454 if performancePrf == "ReducedAmountOfCalculation":
455 __reduceM, __avoidRC = False, True
456 elif performancePrf == "ReducedMemoryFootprint":
457 __reduceM, __avoidRC = True, False
458 elif performancePrf == "NoSavings":
459 __reduceM, __avoidRC = False, False
461 if asScript is not None:
462 __Matrix, __Function = None, None
464 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
466 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
467 __Function.update({"useApproximatedDerivatives":True})
468 __Function.update(__Parameters)
469 elif asThreeFunctions:
471 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
472 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
473 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
475 __Function.update(__Parameters)
478 if asOneFunction is not None:
479 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
480 if asOneFunction["Direct"] is not None:
481 __Function = asOneFunction
483 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
485 __Function = { "Direct":asOneFunction }
486 __Function.update({"useApproximatedDerivatives":True})
487 __Function.update(__Parameters)
488 elif asThreeFunctions is not None:
489 if isinstance(asThreeFunctions, dict) and \
490 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
491 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
492 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
493 __Function = asThreeFunctions
494 elif isinstance(asThreeFunctions, dict) and \
495 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
496 __Function = asThreeFunctions
497 __Function.update({"useApproximatedDerivatives":True})
499 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
500 if "Direct" not in asThreeFunctions:
501 __Function["Direct"] = asThreeFunctions["Tangent"]
502 __Function.update(__Parameters)
506 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
507 # for k in ("Direct", "Tangent", "Adjoint"):
508 # if k in __Function and hasattr(__Function[k],"__class__"):
509 # if type(__Function[k]) is type(self.__init__):
510 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
512 if appliedInX is not None and isinstance(appliedInX, dict):
513 __appliedInX = appliedInX
514 elif appliedInX is not None:
515 __appliedInX = {"HXb":appliedInX}
519 if scheduledBy is not None:
520 self.__T = scheduledBy
522 if isinstance(__Function, dict) and \
523 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
524 ("Direct" in __Function) and (__Function["Direct"] is not None):
525 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
526 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
527 if "withdX" not in __Function: __Function["withdX"] = None
528 if "withReducingMemoryUse" not in __Function: __Function["withReducingMemoryUse"] = __reduceM
529 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = __avoidRC
530 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
531 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
532 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
533 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
534 from daCore import NumericObjects
535 FDA = NumericObjects.FDApproximation(
537 Function = __Function["Direct"],
538 centeredDF = __Function["CenteredFiniteDifference"],
539 increment = __Function["DifferentialIncrement"],
540 dX = __Function["withdX"],
541 extraArguments = self.__extraArgs,
542 reducingMemoryUse = __Function["withReducingMemoryUse"],
543 avoidingRedundancy = __Function["withAvoidingRedundancy"],
544 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
545 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
546 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
547 mpWorkers = __Function["NumberOfProcesses"],
548 mfEnabled = __Function["withmfEnabled"],
550 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = FDA.DirectOperator, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
551 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = FDA.TangentOperator, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
552 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = FDA.AdjointOperator, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
553 elif isinstance(__Function, dict) and \
554 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
555 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
556 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = __Function["Direct"], reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
557 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = __Function["Tangent"], reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
558 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = __Function["Adjoint"], reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
559 elif asMatrix is not None:
560 __matrice = numpy.matrix( __Matrix, numpy.float )
561 self.__FO["Direct"] = Operator( name = self.__name, fromMatrix = __matrice, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
562 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMatrix = __matrice, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF )
563 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMatrix = __matrice.T, reducingMemoryUse = __reduceM, avoidingRedundancy = __avoidRC, inputAsMultiFunction = inputAsMF )
566 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a matrix, a Direct operator for approximate derivatives or a Tangent/Adjoint operators pair. Please check your operator input."%self.__name)
568 if __appliedInX is not None:
569 self.__FO["AppliedInX"] = {}
570 for key in list(__appliedInX.keys()):
571 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
572 # Pour le cas où l'on a une vraie matrice
573 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
574 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
575 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
576 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
578 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
580 self.__FO["AppliedInX"] = None
586 "x.__repr__() <==> repr(x)"
587 return repr(self.__FO)
590 "x.__str__() <==> str(x)"
591 return str(self.__FO)
593 # ==============================================================================
594 class Algorithm(object):
596 Classe générale d'interface de type algorithme
598 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
599 d'assimilation, en fournissant un container (dictionnaire) de variables
600 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
602 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
604 def __init__(self, name):
606 L'initialisation présente permet de fabriquer des variables de stockage
607 disponibles de manière générique dans les algorithmes élémentaires. Ces
608 variables de stockage sont ensuite conservées dans un dictionnaire
609 interne à l'objet, mais auquel on accède par la méthode "get".
611 Les variables prévues sont :
612 - APosterioriCorrelations : matrice de corrélations de la matrice A
613 - APosterioriCovariance : matrice de covariances a posteriori : A
614 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
615 - APosterioriVariances : vecteur des variances de la matrice A
616 - Analysis : vecteur d'analyse : Xa
617 - BMA : Background moins Analysis : Xa - Xb
618 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
619 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
620 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
621 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
622 - CostFunctionJo : partie observations de la fonction-coût : Jo
623 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
624 - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
625 - CurrentOptimum : état optimal courant lors d'itérations
626 - CurrentState : état courant lors d'itérations
627 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
628 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
629 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
630 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
631 - Innovation : l'innovation : d = Y - H(X)
632 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
633 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
634 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
635 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
636 - KalmanGainAtOptimum : gain de Kalman à l'optimum
637 - MahalanobisConsistency : indicateur de consistance des covariances
638 - OMA : Observation moins Analyse : Y - Xa
639 - OMB : Observation moins Background : Y - Xb
640 - ForecastCovariance : covariance de l'état prédit courant lors d'itérations
641 - ForecastState : état prédit courant lors d'itérations
642 - Residu : dans le cas des algorithmes de vérification
643 - SampledStateForQuantiles : échantillons d'états pour l'estimation des quantiles
644 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
645 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
646 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
647 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
648 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
649 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
650 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
651 On peut rajouter des variables à stocker dans l'initialisation de
652 l'algorithme élémentaire qui va hériter de cette classe
654 logging.debug("%s Initialisation", str(name))
655 self._m = PlatformInfo.SystemUsage()
657 self._name = str( name )
658 self._parameters = {"StoreSupplementaryCalculations":[]}
659 self.__internal_state = {}
660 self.__required_parameters = {}
661 self.__required_inputs = {
662 "RequiredInputValues":{"mandatory":(), "optional":()},
663 "ClassificationTags":[],
665 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
666 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
667 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
669 self.StoredVariables = {}
670 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
671 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
672 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
673 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
674 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
675 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
676 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
677 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
678 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
679 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
680 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
681 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
682 self.StoredVariables["CurrentEnsembleState"] = Persistence.OneMatrix(name = "CurrentEnsembleState")
683 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
684 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
685 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
686 self.StoredVariables["ForecastCovariance"] = Persistence.OneMatrix(name = "ForecastCovariance")
687 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
688 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
689 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
690 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
691 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
692 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
693 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
694 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
695 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
696 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
697 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
698 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
699 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
700 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
701 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
702 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
703 self.StoredVariables["SampledStateForQuantiles"] = Persistence.OneMatrix(name = "SampledStateForQuantiles")
704 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
705 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
706 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
707 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
708 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
709 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
710 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
711 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
713 for k in self.StoredVariables:
714 self.__canonical_stored_name[k.lower()] = k
716 for k, v in self.__variable_names_not_public.items():
717 self.__canonical_parameter_name[k.lower()] = k
718 self.__canonical_parameter_name["algorithm"] = "Algorithm"
719 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
721 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
723 logging.debug("%s Lancement", self._name)
724 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
725 self._getTimeState(reset=True)
727 # Mise a jour des paramètres internes avec le contenu de Parameters, en
728 # reprenant les valeurs par défauts pour toutes celles non définies
729 self.__setParameters(Parameters, reset=True)
730 for k, v in self.__variable_names_not_public.items():
731 if k not in self._parameters: self.__setParameters( {k:v} )
733 # Corrections et compléments des vecteurs
734 def __test_vvalue(argument, variable, argname, symbol=None):
735 if symbol is None: symbol = variable
737 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
738 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol))
739 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
740 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol))
742 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol))
744 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
745 logging.debug("%s %s vector %s is required and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
746 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
747 logging.debug("%s %s vector %s is optional and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
749 logging.debug("%s %s vector %s is set although neither required nor optional, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
751 __test_vvalue( Xb, "Xb", "Background or initial state" )
752 __test_vvalue( Y, "Y", "Observation" )
753 __test_vvalue( U, "U", "Control" )
755 # Corrections et compléments des covariances
756 def __test_cvalue(argument, variable, argname, symbol=None):
757 if symbol is None: symbol = variable
759 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
760 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol))
761 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
762 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol))
764 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol))
766 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
767 logging.debug("%s %s error covariance matrix %s is required and set."%(self._name,argname,symbol))
768 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
769 logging.debug("%s %s error covariance matrix %s is optional and set."%(self._name,argname,symbol))
771 logging.debug("%s %s error covariance matrix %s is set although neither required nor optional."%(self._name,argname,symbol))
773 __test_cvalue( B, "B", "Background" )
774 __test_cvalue( R, "R", "Observation" )
775 __test_cvalue( Q, "Q", "Evolution" )
777 # Corrections et compléments des opérateurs
778 def __test_ovalue(argument, variable, argname, symbol=None):
779 if symbol is None: symbol = variable
780 if argument is None or (isinstance(argument,dict) and len(argument)==0):
781 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
782 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol))
783 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
784 logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol))
786 logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol))
788 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
789 logging.debug("%s %s operator %s is required and set."%(self._name,argname,symbol))
790 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
791 logging.debug("%s %s operator %s is optional and set."%(self._name,argname,symbol))
793 logging.debug("%s %s operator %s is set although neither required nor optional."%(self._name,argname,symbol))
795 __test_ovalue( HO, "HO", "Observation", "H" )
796 __test_ovalue( EM, "EM", "Evolution", "M" )
797 __test_ovalue( CM, "CM", "Control Model", "C" )
799 # Corrections et compléments des bornes
800 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
801 logging.debug("%s Bounds taken into account"%(self._name,))
803 self._parameters["Bounds"] = None
804 if ("StateBoundsForQuantiles" in self._parameters) and isinstance(self._parameters["StateBoundsForQuantiles"], (list, tuple)) and (len(self._parameters["StateBoundsForQuantiles"]) > 0):
805 logging.debug("%s Bounds for quantiles states taken into account"%(self._name,))
806 # Attention : contrairement à Bounds, pas de défaut à None, sinon on ne peut pas être sans bornes
808 # Corrections et compléments de l'initialisation en X
809 if "InitializationPoint" in self._parameters:
811 if self._parameters["InitializationPoint"] is not None and hasattr(self._parameters["InitializationPoint"],'size'):
812 if self._parameters["InitializationPoint"].size != numpy.ravel(Xb).size:
813 raise ValueError("Incompatible size %i of forced initial point that have to replace the background of size %i" \
814 %(self._parameters["InitializationPoint"].size,numpy.ravel(Xb).size))
815 # Obtenu par typecast : numpy.ravel(self._parameters["InitializationPoint"])
817 self._parameters["InitializationPoint"] = numpy.ravel(Xb)
819 if self._parameters["InitializationPoint"] is None:
820 raise ValueError("Forced initial point can not be set without any given Background or required value")
822 # Correction pour pallier a un bug de TNC sur le retour du Minimum
823 if "Minimizer" in self._parameters and self._parameters["Minimizer"] == "TNC":
824 self.setParameterValue("StoreInternalVariables",True)
826 # Verbosité et logging
827 if logging.getLogger().level < logging.WARNING:
828 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
829 if PlatformInfo.has_scipy:
830 import scipy.optimize
831 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
833 self._parameters["optmessages"] = 15
835 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
836 if PlatformInfo.has_scipy:
837 import scipy.optimize
838 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
840 self._parameters["optmessages"] = 15
844 def _post_run(self,_oH=None):
846 if ("StoreSupplementaryCalculations" in self._parameters) and \
847 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
848 for _A in self.StoredVariables["APosterioriCovariance"]:
849 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
850 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
851 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
852 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
853 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
854 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
855 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
856 self.StoredVariables["APosterioriCorrelations"].store( _C )
857 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
858 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
859 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
860 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
861 logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
862 logging.debug("%s Terminé", self._name)
865 def _toStore(self, key):
866 "True if in StoreSupplementaryCalculations, else False"
867 return key in self._parameters["StoreSupplementaryCalculations"]
869 def get(self, key=None):
871 Renvoie l'une des variables stockées identifiée par la clé, ou le
872 dictionnaire de l'ensemble des variables disponibles en l'absence de
873 clé. Ce sont directement les variables sous forme objet qui sont
874 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
875 des classes de persistance.
878 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
880 return self.StoredVariables
882 def __contains__(self, key=None):
883 "D.__contains__(k) -> True if D has a key k, else False"
884 if key is None or key.lower() not in self.__canonical_stored_name:
887 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
890 "D.keys() -> list of D's keys"
891 if hasattr(self, "StoredVariables"):
892 return self.StoredVariables.keys()
897 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
898 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
899 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
904 raise TypeError("pop expected at least 1 arguments, got 0")
905 "If key is not found, d is returned if given, otherwise KeyError is raised"
911 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
913 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
914 sa forme mathématique la plus naturelle possible.
916 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
918 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None, listadv = None):
920 Permet de définir dans l'algorithme des paramètres requis et leurs
921 caractéristiques par défaut.
924 raise ValueError("A name is mandatory to define a required parameter.")
926 self.__required_parameters[name] = {
928 "typecast" : typecast,
935 self.__canonical_parameter_name[name.lower()] = name
936 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
938 def getRequiredParameters(self, noDetails=True):
940 Renvoie la liste des noms de paramètres requis ou directement le
941 dictionnaire des paramètres requis.
944 return sorted(self.__required_parameters.keys())
946 return self.__required_parameters
948 def setParameterValue(self, name=None, value=None):
950 Renvoie la valeur d'un paramètre requis de manière contrôlée
952 __k = self.__canonical_parameter_name[name.lower()]
953 default = self.__required_parameters[__k]["default"]
954 typecast = self.__required_parameters[__k]["typecast"]
955 minval = self.__required_parameters[__k]["minval"]
956 maxval = self.__required_parameters[__k]["maxval"]
957 listval = self.__required_parameters[__k]["listval"]
958 listadv = self.__required_parameters[__k]["listadv"]
960 if value is None and default is None:
962 elif value is None and default is not None:
963 if typecast is None: __val = default
964 else: __val = typecast( default )
966 if typecast is None: __val = value
969 __val = typecast( value )
971 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
973 if minval is not None and (numpy.array(__val, float) < minval).any():
974 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
975 if maxval is not None and (numpy.array(__val, float) > maxval).any():
976 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
977 if listval is not None or listadv is not None:
978 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
980 if listval is not None and v in listval: continue
981 elif listadv is not None and v in listadv: continue
983 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
984 elif not (listval is not None and __val in listval) and not (listadv is not None and __val in listadv):
985 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
989 def requireInputArguments(self, mandatory=(), optional=()):
991 Permet d'imposer des arguments de calcul requis en entrée.
993 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
994 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
996 def getInputArguments(self):
998 Permet d'obtenir les listes des arguments de calcul requis en entrée.
1000 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
1002 def setAttributes(self, tags=()):
1004 Permet d'adjoindre des attributs comme les tags de classification.
1005 Renvoie la liste actuelle dans tous les cas.
1007 self.__required_inputs["ClassificationTags"].extend( tags )
1008 return self.__required_inputs["ClassificationTags"]
1010 def __setParameters(self, fromDico={}, reset=False):
1012 Permet de stocker les paramètres reçus dans le dictionnaire interne.
1014 self._parameters.update( fromDico )
1015 __inverse_fromDico_keys = {}
1016 for k in fromDico.keys():
1017 if k.lower() in self.__canonical_parameter_name:
1018 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
1019 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
1020 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
1021 for k in self.__required_parameters.keys():
1022 if k in __canonic_fromDico_keys:
1023 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
1025 self._parameters[k] = self.setParameterValue(k)
1028 if hasattr(self._parameters[k],"__len__") and len(self._parameters[k]) > 100:
1029 logging.debug("%s %s de longueur %s", self._name, self.__required_parameters[k]["message"], len(self._parameters[k]))
1031 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
1033 def _setInternalState(self, key=None, value=None, fromDico={}, reset=False):
1035 Permet de stocker des variables nommées constituant l'état interne
1037 if reset: # Vide le dictionnaire préalablement
1038 self.__internal_state = {}
1039 if key is not None and value is not None:
1040 self.__internal_state[key] = value
1041 self.__internal_state.update( dict(fromDico) )
1043 def _getInternalState(self, key=None):
1045 Restitue un état interne sous la forme d'un dictionnaire de variables nommées
1047 if key is not None and key in self.__internal_state:
1048 return self.__internal_state[key]
1050 return self.__internal_state
1052 def _getTimeState(self, reset=False):
1054 Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde
1057 self.__initial_cpu_time = time.process_time()
1058 self.__initial_elapsed_time = time.perf_counter()
1061 self.__cpu_time = time.process_time() - self.__initial_cpu_time
1062 self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time
1063 return self.__cpu_time, self.__elapsed_time
1065 def _StopOnTimeLimit(self, X=None, withReason=False):
1066 "Stop criteria on time limit: True/False [+ Reason]"
1067 c, e = self._getTimeState()
1068 if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]:
1069 __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"])
1070 elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]:
1071 __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"])
1073 __SC, __SR = False, ""
1079 # ==============================================================================
1080 class AlgorithmAndParameters(object):
1082 Classe générale d'interface d'action pour l'algorithme et ses paramètres
1085 name = "GenericAlgorithm",
1092 self.__name = str(name)
1096 self.__algorithm = {}
1097 self.__algorithmFile = None
1098 self.__algorithmName = None
1100 self.updateParameters( asDict, asScript )
1102 if asAlgorithm is None and asScript is not None:
1103 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1105 __Algo = asAlgorithm
1107 if __Algo is not None:
1108 self.__A = str(__Algo)
1109 self.__P.update( {"Algorithm":self.__A} )
1111 self.__setAlgorithm( self.__A )
1113 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
1115 def updateParameters(self,
1119 "Mise a jour des parametres"
1120 if asDict is None and asScript is not None:
1121 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1125 if __Dict is not None:
1126 self.__P.update( dict(__Dict) )
1128 def executePythonScheme(self, asDictAO = None):
1129 "Permet de lancer le calcul d'assimilation"
1130 Operator.CM.clearCache()
1132 if not isinstance(asDictAO, dict):
1133 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1134 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
1135 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
1136 else: self.__Xb = None
1137 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
1138 else: self.__Y = asDictAO["Observation"]
1139 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
1140 else: self.__U = asDictAO["ControlInput"]
1141 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
1142 else: self.__HO = asDictAO["ObservationOperator"]
1143 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
1144 else: self.__EM = asDictAO["EvolutionModel"]
1145 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
1146 else: self.__CM = asDictAO["ControlModel"]
1147 self.__B = asDictAO["BackgroundError"]
1148 self.__R = asDictAO["ObservationError"]
1149 self.__Q = asDictAO["EvolutionError"]
1151 self.__shape_validate()
1153 self.__algorithm.run(
1163 Parameters = self.__P,
1167 def executeYACSScheme(self, FileName=None):
1168 "Permet de lancer le calcul d'assimilation"
1169 if FileName is None or not os.path.exists(FileName):
1170 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1172 __file = os.path.abspath(FileName)
1173 logging.debug("The YACS file name is \"%s\"."%__file)
1174 if not PlatformInfo.has_salome or \
1175 not PlatformInfo.has_yacs or \
1176 not PlatformInfo.has_adao:
1177 raise ImportError("\n\n"+\
1178 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
1179 "Please load the right environnement before trying to use it.\n")
1182 import SALOMERuntime
1184 SALOMERuntime.RuntimeSALOME_setRuntime()
1186 r = pilot.getRuntime()
1187 xmlLoader = loader.YACSLoader()
1188 xmlLoader.registerProcCataLoader()
1190 catalogAd = r.loadCatalog("proc", __file)
1191 r.addCatalog(catalogAd)
1196 p = xmlLoader.load(__file)
1197 except IOError as ex:
1198 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1200 logger = p.getLogger("parser")
1201 if not logger.isEmpty():
1202 print("The imported YACS XML schema has errors on parsing:")
1203 print(logger.getStr())
1206 print("The YACS XML schema is not valid and will not be executed:")
1207 print(p.getErrorReport())
1209 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1210 p.checkConsistency(info)
1211 if info.areWarningsOrErrors():
1212 print("The YACS XML schema is not coherent and will not be executed:")
1213 print(info.getGlobalRepr())
1215 e = pilot.ExecutorSwig()
1217 if p.getEffectiveState() != pilot.DONE:
1218 print(p.getErrorReport())
1222 def get(self, key = None):
1223 "Vérifie l'existence d'une clé de variable ou de paramètres"
1224 if key in self.__algorithm:
1225 return self.__algorithm.get( key )
1226 elif key in self.__P:
1227 return self.__P[key]
1229 allvariables = self.__P
1230 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1233 def pop(self, k, d):
1234 "Necessaire pour le pickling"
1235 return self.__algorithm.pop(k, d)
1237 def getAlgorithmRequiredParameters(self, noDetails=True):
1238 "Renvoie la liste des paramètres requis selon l'algorithme"
1239 return self.__algorithm.getRequiredParameters(noDetails)
1241 def getAlgorithmInputArguments(self):
1242 "Renvoie la liste des entrées requises selon l'algorithme"
1243 return self.__algorithm.getInputArguments()
1245 def getAlgorithmAttributes(self):
1246 "Renvoie la liste des attributs selon l'algorithme"
1247 return self.__algorithm.setAttributes()
1249 def setObserver(self, __V, __O, __I, __S):
1250 if self.__algorithm is None \
1251 or isinstance(self.__algorithm, dict) \
1252 or not hasattr(self.__algorithm,"StoredVariables"):
1253 raise ValueError("No observer can be build before choosing an algorithm.")
1254 if __V not in self.__algorithm:
1255 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1257 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1260 HookParameters = __I,
1263 def removeObserver(self, __V, __O, __A = False):
1264 if self.__algorithm is None \
1265 or isinstance(self.__algorithm, dict) \
1266 or not hasattr(self.__algorithm,"StoredVariables"):
1267 raise ValueError("No observer can be removed before choosing an algorithm.")
1268 if __V not in self.__algorithm:
1269 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1271 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1276 def hasObserver(self, __V):
1277 if self.__algorithm is None \
1278 or isinstance(self.__algorithm, dict) \
1279 or not hasattr(self.__algorithm,"StoredVariables"):
1281 if __V not in self.__algorithm:
1283 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1286 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1287 for k in self.__variable_names_not_public:
1288 if k in __allvariables: __allvariables.remove(k)
1289 return __allvariables
1291 def __contains__(self, key=None):
1292 "D.__contains__(k) -> True if D has a key k, else False"
1293 return key in self.__algorithm or key in self.__P
1296 "x.__repr__() <==> repr(x)"
1297 return repr(self.__A)+", "+repr(self.__P)
1300 "x.__str__() <==> str(x)"
1301 return str(self.__A)+", "+str(self.__P)
1303 def __setAlgorithm(self, choice = None ):
1305 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1306 d'assimilation. L'argument est un champ caractère se rapportant au nom
1307 d'un algorithme réalisant l'opération sur les arguments fixes.
1310 raise ValueError("Error: algorithm choice has to be given")
1311 if self.__algorithmName is not None:
1312 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1313 daDirectory = "daAlgorithms"
1315 # Recherche explicitement le fichier complet
1316 # ------------------------------------------
1318 for directory in sys.path:
1319 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1320 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1321 if module_path is None:
1322 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1324 # Importe le fichier complet comme un module
1325 # ------------------------------------------
1327 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1328 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1329 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1330 raise ImportError("this module does not define a valid elementary algorithm.")
1331 self.__algorithmName = str(choice)
1332 sys.path = sys_path_tmp ; del sys_path_tmp
1333 except ImportError as e:
1334 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1336 # Instancie un objet du type élémentaire du fichier
1337 # -------------------------------------------------
1338 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1341 def __shape_validate(self):
1343 Validation de la correspondance correcte des tailles des variables et
1344 des matrices s'il y en a.
1346 if self.__Xb is None: __Xb_shape = (0,)
1347 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1348 elif hasattr(self.__Xb,"shape"):
1349 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1350 else: __Xb_shape = self.__Xb.shape()
1351 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1353 if self.__Y is None: __Y_shape = (0,)
1354 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1355 elif hasattr(self.__Y,"shape"):
1356 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1357 else: __Y_shape = self.__Y.shape()
1358 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1360 if self.__U is None: __U_shape = (0,)
1361 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1362 elif hasattr(self.__U,"shape"):
1363 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1364 else: __U_shape = self.__U.shape()
1365 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1367 if self.__B is None: __B_shape = (0,0)
1368 elif hasattr(self.__B,"shape"):
1369 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1370 else: __B_shape = self.__B.shape()
1371 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1373 if self.__R is None: __R_shape = (0,0)
1374 elif hasattr(self.__R,"shape"):
1375 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1376 else: __R_shape = self.__R.shape()
1377 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1379 if self.__Q is None: __Q_shape = (0,0)
1380 elif hasattr(self.__Q,"shape"):
1381 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1382 else: __Q_shape = self.__Q.shape()
1383 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1385 if len(self.__HO) == 0: __HO_shape = (0,0)
1386 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1387 elif hasattr(self.__HO["Direct"],"shape"):
1388 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1389 else: __HO_shape = self.__HO["Direct"].shape()
1390 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1392 if len(self.__EM) == 0: __EM_shape = (0,0)
1393 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1394 elif hasattr(self.__EM["Direct"],"shape"):
1395 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1396 else: __EM_shape = self.__EM["Direct"].shape()
1397 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1399 if len(self.__CM) == 0: __CM_shape = (0,0)
1400 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1401 elif hasattr(self.__CM["Direct"],"shape"):
1402 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1403 else: __CM_shape = self.__CM["Direct"].shape()
1404 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1406 # Vérification des conditions
1407 # ---------------------------
1408 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1409 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1410 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1411 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1413 if not( min(__B_shape) == max(__B_shape) ):
1414 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1415 if not( min(__R_shape) == max(__R_shape) ):
1416 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1417 if not( min(__Q_shape) == max(__Q_shape) ):
1418 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1419 if not( min(__EM_shape) == max(__EM_shape) ):
1420 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1422 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1423 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1424 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1425 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1426 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1427 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1428 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1429 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1431 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1432 if self.__algorithmName in ["EnsembleBlue",]:
1433 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1434 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1435 for member in asPersistentVector:
1436 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1437 __Xb_shape = min(__B_shape)
1439 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1441 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1442 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1444 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1445 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1447 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1448 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1450 if ("Bounds" in self.__P) \
1451 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1452 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1453 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1454 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1456 if ("StateBoundsForQuantiles" in self.__P) \
1457 and (isinstance(self.__P["StateBoundsForQuantiles"], list) or isinstance(self.__P["StateBoundsForQuantiles"], tuple)) \
1458 and (len(self.__P["StateBoundsForQuantiles"]) != max(__Xb_shape)):
1459 raise ValueError("The number \"%s\" of bound pairs for the quantile state (X) components is different of the size \"%s\" of the state itself." \
1460 %(len(self.__P["StateBoundsForQuantiles"]),max(__Xb_shape)))
1464 # ==============================================================================
1465 class RegulationAndParameters(object):
1467 Classe générale d'interface d'action pour la régulation et ses paramètres
1470 name = "GenericRegulation",
1477 self.__name = str(name)
1480 if asAlgorithm is None and asScript is not None:
1481 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1483 __Algo = asAlgorithm
1485 if asDict is None and asScript is not None:
1486 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1490 if __Dict is not None:
1491 self.__P.update( dict(__Dict) )
1493 if __Algo is not None:
1494 self.__P.update( {"Algorithm":str(__Algo)} )
1496 def get(self, key = None):
1497 "Vérifie l'existence d'une clé de variable ou de paramètres"
1499 return self.__P[key]
1503 # ==============================================================================
1504 class DataObserver(object):
1506 Classe générale d'interface de type observer
1509 name = "GenericObserver",
1521 self.__name = str(name)
1526 if onVariable is None:
1527 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1528 elif type(onVariable) in (tuple, list):
1529 self.__V = tuple(map( str, onVariable ))
1530 if withInfo is None:
1533 self.__I = (str(withInfo),)*len(self.__V)
1534 elif isinstance(onVariable, str):
1535 self.__V = (onVariable,)
1536 if withInfo is None:
1537 self.__I = (onVariable,)
1539 self.__I = (str(withInfo),)
1541 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1543 if asObsObject is not None:
1544 self.__O = asObsObject
1546 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1547 __Function = Observer2Func(__FunctionText)
1548 self.__O = __Function.getfunc()
1550 for k in range(len(self.__V)):
1553 if ename not in withAlgo:
1554 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1556 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1559 "x.__repr__() <==> repr(x)"
1560 return repr(self.__V)+"\n"+repr(self.__O)
1563 "x.__str__() <==> str(x)"
1564 return str(self.__V)+"\n"+str(self.__O)
1566 # ==============================================================================
1567 class UserScript(object):
1569 Classe générale d'interface de type texte de script utilisateur
1572 name = "GenericUserScript",
1579 self.__name = str(name)
1581 if asString is not None:
1583 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1584 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1585 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1586 self.__F = Templates.ObserverTemplates[asTemplate]
1587 elif asScript is not None:
1588 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1593 "x.__repr__() <==> repr(x)"
1594 return repr(self.__F)
1597 "x.__str__() <==> str(x)"
1598 return str(self.__F)
1600 # ==============================================================================
1601 class ExternalParameters(object):
1603 Classe générale d'interface de type texte de script utilisateur
1606 name = "GenericExternalParameters",
1612 self.__name = str(name)
1615 self.updateParameters( asDict, asScript )
1617 def updateParameters(self,
1621 "Mise a jour des parametres"
1622 if asDict is None and asScript is not None:
1623 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1627 if __Dict is not None:
1628 self.__P.update( dict(__Dict) )
1630 def get(self, key = None):
1632 return self.__P[key]
1634 return list(self.__P.keys())
1637 return list(self.__P.keys())
1639 def pop(self, k, d):
1640 return self.__P.pop(k, d)
1643 return self.__P.items()
1645 def __contains__(self, key=None):
1646 "D.__contains__(k) -> True if D has a key k, else False"
1647 return key in self.__P
1649 # ==============================================================================
1650 class State(object):
1652 Classe générale d'interface de type état
1655 name = "GenericVector",
1657 asPersistentVector = None,
1663 toBeChecked = False,
1666 Permet de définir un vecteur :
1667 - asVector : entrée des données, comme un vecteur compatible avec le
1668 constructeur de numpy.matrix, ou "True" si entrée par script.
1669 - asPersistentVector : entrée des données, comme une série de vecteurs
1670 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1671 type Persistence, ou "True" si entrée par script.
1672 - asScript : si un script valide est donné contenant une variable
1673 nommée "name", la variable est de type "asVector" (par défaut) ou
1674 "asPersistentVector" selon que l'une de ces variables est placée à
1676 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1677 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1678 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1679 nommée "name"), on récupère les colonnes et on les range ligne après
1680 ligne (colMajor=False, par défaut) ou colonne après colonne
1681 (colMajor=True). La variable résultante est de type "asVector" (par
1682 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1685 self.__name = str(name)
1686 self.__check = bool(toBeChecked)
1690 self.__is_vector = False
1691 self.__is_series = False
1693 if asScript is not None:
1694 __Vector, __Series = None, None
1695 if asPersistentVector:
1696 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1698 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1699 elif asDataFile is not None:
1700 __Vector, __Series = None, None
1701 if asPersistentVector:
1702 if colNames is not None:
1703 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1705 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1706 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1707 __Series = numpy.transpose(__Series)
1708 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1709 __Series = numpy.transpose(__Series)
1711 if colNames is not None:
1712 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1714 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1716 __Vector = numpy.ravel(__Vector, order = "F")
1718 __Vector = numpy.ravel(__Vector, order = "C")
1720 __Vector, __Series = asVector, asPersistentVector
1722 if __Vector is not None:
1723 self.__is_vector = True
1724 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1725 self.shape = self.__V.shape
1726 self.size = self.__V.size
1727 elif __Series is not None:
1728 self.__is_series = True
1729 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1730 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1731 if isinstance(__Series, str): __Series = eval(__Series)
1732 for member in __Series:
1733 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1736 if isinstance(self.__V.shape, (tuple, list)):
1737 self.shape = self.__V.shape
1739 self.shape = self.__V.shape()
1740 if len(self.shape) == 1:
1741 self.shape = (self.shape[0],1)
1742 self.size = self.shape[0] * self.shape[1]
1744 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1746 if scheduledBy is not None:
1747 self.__T = scheduledBy
1749 def getO(self, withScheduler=False):
1751 return self.__V, self.__T
1752 elif self.__T is None:
1758 "Vérification du type interne"
1759 return self.__is_vector
1762 "Vérification du type interne"
1763 return self.__is_series
1766 "x.__repr__() <==> repr(x)"
1767 return repr(self.__V)
1770 "x.__str__() <==> str(x)"
1771 return str(self.__V)
1773 # ==============================================================================
1774 class Covariance(object):
1776 Classe générale d'interface de type covariance
1779 name = "GenericCovariance",
1780 asCovariance = None,
1781 asEyeByScalar = None,
1782 asEyeByVector = None,
1785 toBeChecked = False,
1788 Permet de définir une covariance :
1789 - asCovariance : entrée des données, comme une matrice compatible avec
1790 le constructeur de numpy.matrix
1791 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1792 multiplicatif d'une matrice de corrélation identité, aucune matrice
1793 n'étant donc explicitement à donner
1794 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1795 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1796 n'étant donc explicitement à donner
1797 - asCovObject : entrée des données comme un objet python, qui a les
1798 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1799 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1800 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1801 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1802 pleine doit être vérifié
1804 self.__name = str(name)
1805 self.__check = bool(toBeChecked)
1808 self.__is_scalar = False
1809 self.__is_vector = False
1810 self.__is_matrix = False
1811 self.__is_object = False
1813 if asScript is not None:
1814 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1816 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1818 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1820 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1822 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1824 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1826 if __Scalar is not None:
1827 if isinstance(__Scalar, str):
1828 __Scalar = __Scalar.replace(";"," ").replace(","," ").split()
1829 if len(__Scalar) > 0: __Scalar = __Scalar[0]
1830 if numpy.array(__Scalar).size != 1:
1831 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.array(__Scalar).size)
1832 self.__is_scalar = True
1833 self.__C = numpy.abs( float(__Scalar) )
1836 elif __Vector is not None:
1837 if isinstance(__Vector, str):
1838 __Vector = __Vector.replace(";"," ").replace(","," ").split()
1839 self.__is_vector = True
1840 self.__C = numpy.abs( numpy.array( numpy.ravel( __Vector ), dtype=float ) )
1841 self.shape = (self.__C.size,self.__C.size)
1842 self.size = self.__C.size**2
1843 elif __Matrix is not None:
1844 self.__is_matrix = True
1845 self.__C = numpy.matrix( __Matrix, float )
1846 self.shape = self.__C.shape
1847 self.size = self.__C.size
1848 elif __Object is not None:
1849 self.__is_object = True
1851 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__matmul__","__mul__","__rmatmul__","__rmul__"):
1852 if not hasattr(self.__C,at):
1853 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1854 if hasattr(self.__C,"shape"):
1855 self.shape = self.__C.shape
1858 if hasattr(self.__C,"size"):
1859 self.size = self.__C.size
1864 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1868 def __validate(self):
1870 if self.__C is None:
1871 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1872 if self.ismatrix() and min(self.shape) != max(self.shape):
1873 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1874 if self.isobject() and min(self.shape) != max(self.shape):
1875 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1876 if self.isscalar() and self.__C <= 0:
1877 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1878 if self.isvector() and (self.__C <= 0).any():
1879 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1880 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1882 L = numpy.linalg.cholesky( self.__C )
1884 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1885 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1887 L = self.__C.cholesky()
1889 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1892 "Vérification du type interne"
1893 return self.__is_scalar
1896 "Vérification du type interne"
1897 return self.__is_vector
1900 "Vérification du type interne"
1901 return self.__is_matrix
1904 "Vérification du type interne"
1905 return self.__is_object
1910 return Covariance(self.__name+"I", asCovariance = numpy.linalg.inv(self.__C) )
1911 elif self.isvector():
1912 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1913 elif self.isscalar():
1914 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1915 elif self.isobject() and hasattr(self.__C,"getI"):
1916 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1918 return None # Indispensable
1923 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1924 elif self.isvector():
1925 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1926 elif self.isscalar():
1927 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1928 elif self.isobject() and hasattr(self.__C,"getT"):
1929 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1931 raise AttributeError("the %s covariance matrix has no getT attribute."%(self.__name,))
1934 "Décomposition de Cholesky"
1936 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1937 elif self.isvector():
1938 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1939 elif self.isscalar():
1940 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1941 elif self.isobject() and hasattr(self.__C,"cholesky"):
1942 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1944 raise AttributeError("the %s covariance matrix has no cholesky attribute."%(self.__name,))
1946 def choleskyI(self):
1947 "Inversion de la décomposition de Cholesky"
1949 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.linalg.cholesky(self.__C)) )
1950 elif self.isvector():
1951 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1952 elif self.isscalar():
1953 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1954 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1955 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1957 raise AttributeError("the %s covariance matrix has no choleskyI attribute."%(self.__name,))
1960 "Racine carrée matricielle"
1963 return Covariance(self.__name+"C", asCovariance = numpy.real(scipy.linalg.sqrtm(self.__C)) )
1964 elif self.isvector():
1965 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1966 elif self.isscalar():
1967 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1968 elif self.isobject() and hasattr(self.__C,"sqrtm"):
1969 return Covariance(self.__name+"C", asCovObject = self.__C.sqrtm() )
1971 raise AttributeError("the %s covariance matrix has no sqrtm attribute."%(self.__name,))
1974 "Inversion de la racine carrée matricielle"
1977 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.real(scipy.linalg.sqrtm(self.__C))) )
1978 elif self.isvector():
1979 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1980 elif self.isscalar():
1981 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1982 elif self.isobject() and hasattr(self.__C,"sqrtmI"):
1983 return Covariance(self.__name+"H", asCovObject = self.__C.sqrtmI() )
1985 raise AttributeError("the %s covariance matrix has no sqrtmI attribute."%(self.__name,))
1987 def diag(self, msize=None):
1988 "Diagonale de la matrice"
1990 return numpy.diag(self.__C)
1991 elif self.isvector():
1993 elif self.isscalar():
1995 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1997 return self.__C * numpy.ones(int(msize))
1998 elif self.isobject() and hasattr(self.__C,"diag"):
1999 return self.__C.diag()
2001 raise AttributeError("the %s covariance matrix has no diag attribute."%(self.__name,))
2003 def trace(self, msize=None):
2004 "Trace de la matrice"
2006 return numpy.trace(self.__C)
2007 elif self.isvector():
2008 return float(numpy.sum(self.__C))
2009 elif self.isscalar():
2011 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2013 return self.__C * int(msize)
2014 elif self.isobject():
2015 return self.__C.trace()
2017 raise AttributeError("the %s covariance matrix has no trace attribute."%(self.__name,))
2019 def asfullmatrix(self, msize=None):
2022 return numpy.asarray(self.__C)
2023 elif self.isvector():
2024 return numpy.asarray( numpy.diag(self.__C), float )
2025 elif self.isscalar():
2027 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2029 return numpy.asarray( self.__C * numpy.eye(int(msize)), float )
2030 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
2031 return self.__C.asfullmatrix()
2033 raise AttributeError("the %s covariance matrix has no asfullmatrix attribute."%(self.__name,))
2035 def assparsematrix(self):
2043 "x.__repr__() <==> repr(x)"
2044 return repr(self.__C)
2047 "x.__str__() <==> str(x)"
2048 return str(self.__C)
2050 def __add__(self, other):
2051 "x.__add__(y) <==> x+y"
2052 if self.ismatrix() or self.isobject():
2053 return self.__C + numpy.asmatrix(other)
2054 elif self.isvector() or self.isscalar():
2055 _A = numpy.asarray(other)
2056 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
2057 return numpy.asmatrix(_A)
2059 def __radd__(self, other):
2060 "x.__radd__(y) <==> y+x"
2061 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
2063 def __sub__(self, other):
2064 "x.__sub__(y) <==> x-y"
2065 if self.ismatrix() or self.isobject():
2066 return self.__C - numpy.asmatrix(other)
2067 elif self.isvector() or self.isscalar():
2068 _A = numpy.asarray(other)
2069 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
2070 return numpy.asmatrix(_A)
2072 def __rsub__(self, other):
2073 "x.__rsub__(y) <==> y-x"
2074 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
2077 "x.__neg__() <==> -x"
2080 def __matmul__(self, other):
2081 "x.__mul__(y) <==> x@y"
2082 if self.ismatrix() and isinstance(other, (int, float)):
2083 return numpy.asarray(self.__C) * other
2084 elif self.ismatrix() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2085 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2086 return numpy.ravel(self.__C @ numpy.ravel(other))
2087 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2088 return numpy.asarray(self.__C) @ numpy.asarray(other)
2090 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asarray(other).shape,self.__name))
2091 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2092 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2093 return numpy.ravel(self.__C) * numpy.ravel(other)
2094 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2095 return numpy.ravel(self.__C).reshape((-1,1)) * numpy.asarray(other)
2097 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2098 elif self.isscalar() and isinstance(other,numpy.matrix):
2099 return numpy.asarray(self.__C * other)
2100 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2101 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2102 return self.__C * numpy.ravel(other)
2104 return self.__C * numpy.asarray(other)
2105 elif self.isobject():
2106 return self.__C.__matmul__(other)
2108 raise NotImplementedError("%s covariance matrix __matmul__ method not available for %s type!"%(self.__name,type(other)))
2110 def __mul__(self, other):
2111 "x.__mul__(y) <==> x*y"
2112 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2113 return self.__C * other
2114 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2115 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2116 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2117 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2118 return self.__C * numpy.asmatrix(other)
2120 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
2121 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2122 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2123 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
2124 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2125 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
2127 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2128 elif self.isscalar() and isinstance(other,numpy.matrix):
2129 return self.__C * other
2130 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2131 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2132 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2134 return self.__C * numpy.asmatrix(other)
2135 elif self.isobject():
2136 return self.__C.__mul__(other)
2138 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
2140 def __rmatmul__(self, other):
2141 "x.__rmul__(y) <==> y@x"
2142 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2143 return other * self.__C
2144 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2145 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2146 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2147 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2148 return numpy.asmatrix(other) * self.__C
2150 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2151 elif self.isvector() and isinstance(other,numpy.matrix):
2152 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2153 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2154 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2155 return numpy.asmatrix(numpy.array(other) * self.__C)
2157 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2158 elif self.isscalar() and isinstance(other,numpy.matrix):
2159 return other * self.__C
2160 elif self.isobject():
2161 return self.__C.__rmatmul__(other)
2163 raise NotImplementedError("%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name,type(other)))
2165 def __rmul__(self, other):
2166 "x.__rmul__(y) <==> y*x"
2167 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2168 return other * self.__C
2169 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2170 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2171 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2172 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2173 return numpy.asmatrix(other) * self.__C
2175 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2176 elif self.isvector() and isinstance(other,numpy.matrix):
2177 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2178 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2179 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2180 return numpy.asmatrix(numpy.array(other) * self.__C)
2182 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2183 elif self.isscalar() and isinstance(other,numpy.matrix):
2184 return other * self.__C
2185 elif self.isobject():
2186 return self.__C.__rmul__(other)
2188 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
2191 "x.__len__() <==> len(x)"
2192 return self.shape[0]
2194 # ==============================================================================
2195 class Observer2Func(object):
2197 Création d'une fonction d'observateur a partir de son texte
2199 def __init__(self, corps=""):
2200 self.__corps = corps
2201 def func(self,var,info):
2202 "Fonction d'observation"
2205 "Restitution du pointeur de fonction dans l'objet"
2208 # ==============================================================================
2209 class CaseLogger(object):
2211 Conservation des commandes de création d'un cas
2213 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
2214 self.__name = str(__name)
2215 self.__objname = str(__objname)
2216 self.__logSerie = []
2217 self.__switchoff = False
2219 "TUI" :Interfaces._TUIViewer,
2220 "SCD" :Interfaces._SCDViewer,
2221 "YACS":Interfaces._YACSViewer,
2222 "SimpleReportInRst":Interfaces._SimpleReportInRstViewer,
2223 "SimpleReportInHtml":Interfaces._SimpleReportInHtmlViewer,
2224 "SimpleReportInPlainTxt":Interfaces._SimpleReportInPlainTxtViewer,
2227 "TUI" :Interfaces._TUIViewer,
2228 "COM" :Interfaces._COMViewer,
2230 if __addViewers is not None:
2231 self.__viewers.update(dict(__addViewers))
2232 if __addLoaders is not None:
2233 self.__loaders.update(dict(__addLoaders))
2235 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
2236 "Enregistrement d'une commande individuelle"
2237 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
2238 if "self" in __keys: __keys.remove("self")
2239 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
2241 self.__switchoff = True
2243 self.__switchoff = False
2245 def dump(self, __filename=None, __format="TUI", __upa=""):
2246 "Restitution normalisée des commandes"
2247 if __format in self.__viewers:
2248 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2250 raise ValueError("Dumping as \"%s\" is not available"%__format)
2251 return __formater.dump(__filename, __upa)
2253 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2254 "Chargement normalisé des commandes"
2255 if __format in self.__loaders:
2256 __formater = self.__loaders[__format]()
2258 raise ValueError("Loading as \"%s\" is not available"%__format)
2259 return __formater.load(__filename, __content, __object)
2261 # ==============================================================================
2264 _extraArguments = None,
2265 _sFunction = lambda x: x,
2270 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2271 correspondante de valeurs de la fonction en argument
2273 # Vérifications et définitions initiales
2274 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2275 if not PlatformInfo.isIterable( __xserie ):
2276 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2278 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2281 __mpWorkers = int(_mpWorkers)
2283 import multiprocessing
2294 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2295 import multiprocessing
2296 with multiprocessing.Pool(__mpWorkers) as pool:
2297 __multiHX = pool.map( _sFunction, _jobs )
2300 # logging.debug("MULTF Internal multiprocessing calculation end")
2302 # logging.debug("MULTF Internal monoprocessing calculation begin")
2304 if _extraArguments is None:
2305 for __xvalue in __xserie:
2306 __multiHX.append( _sFunction( __xvalue ) )
2307 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2308 for __xvalue in __xserie:
2309 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2310 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2311 for __xvalue in __xserie:
2312 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2314 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2315 # logging.debug("MULTF Internal monoprocessing calculation end")
2317 # logging.debug("MULTF Internal multifonction calculations end")
2320 # ==============================================================================
2321 def CostFunction3D(_x,
2322 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
2323 _HmX = None, # Simulation déjà faite de Hm(x)
2324 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
2329 _SIV = False, # A résorber pour la 8.0
2330 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
2331 _nPS = 0, # nbPreviousSteps
2332 _QM = "DA", # QualityMeasure
2333 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
2334 _fRt = False, # Restitue ou pas la sortie étendue
2335 _sSc = True, # Stocke ou pas les SSC
2338 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
2339 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
2340 DFO, QuantileRegression
2346 for k in ["CostFunctionJ",
2352 "SimulatedObservationAtCurrentOptimum",
2353 "SimulatedObservationAtCurrentState",
2357 if hasattr(_SSV[k],"store"):
2358 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
2360 _X = numpy.asmatrix(numpy.ravel( _x )).T
2361 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
2362 _SSV["CurrentState"].append( _X )
2364 if _HmX is not None:
2368 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
2372 _HX = _Hm( _X, *_arg )
2373 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2375 if "SimulatedObservationAtCurrentState" in _SSC or \
2376 "SimulatedObservationAtCurrentOptimum" in _SSC:
2377 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
2379 if numpy.any(numpy.isnan(_HX)):
2380 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
2382 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
2383 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
2384 if _BI is None or _RI is None:
2385 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
2386 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
2387 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
2388 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2389 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
2391 raise ValueError("Observation error covariance matrix has to be properly defined!")
2393 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2394 elif _QM in ["LeastSquares", "LS", "L2"]:
2396 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
2397 elif _QM in ["AbsoluteValue", "L1"]:
2399 Jo = numpy.sum( numpy.abs(_Y - _HX) )
2400 elif _QM in ["MaximumError", "ME"]:
2402 Jo = numpy.max( numpy.abs(_Y - _HX) )
2403 elif _QM in ["QR", "Null"]:
2407 raise ValueError("Unknown asked quality measure!")
2409 J = float( Jb ) + float( Jo )
2412 _SSV["CostFunctionJb"].append( Jb )
2413 _SSV["CostFunctionJo"].append( Jo )
2414 _SSV["CostFunctionJ" ].append( J )
2416 if "IndexOfOptimum" in _SSC or \
2417 "CurrentOptimum" in _SSC or \
2418 "SimulatedObservationAtCurrentOptimum" in _SSC:
2419 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
2420 if "IndexOfOptimum" in _SSC:
2421 _SSV["IndexOfOptimum"].append( IndexMin )
2422 if "CurrentOptimum" in _SSC:
2423 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
2424 if "SimulatedObservationAtCurrentOptimum" in _SSC:
2425 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
2430 if _QM in ["QR"]: # Pour le QuantileRegression
2435 # ==============================================================================
2436 if __name__ == "__main__":
2437 print('\n AUTODIAGNOSTIC\n')