1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2021 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
35 from functools import partial
36 from daCore import Persistence, PlatformInfo, Interfaces
37 from daCore import Templates
39 # ==============================================================================
40 class CacheManager(object):
42 Classe générale de gestion d'un cache de calculs
45 toleranceInRedundancy = 1.e-18,
46 lenghtOfRedundancy = -1,
49 Les caractéristiques de tolérance peuvent être modifiées à la création.
51 self.__tolerBP = float(toleranceInRedundancy)
52 self.__lenghtOR = int(lenghtOfRedundancy)
53 self.__initlnOR = self.__lenghtOR
63 def wasCalculatedIn(self, xValue, oName="" ):
64 "Vérifie l'existence d'un calcul correspondant à la valeur"
68 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
69 if not hasattr(xValue, 'size'):
71 elif (str(oName) != self.__listOPCV[i][3]):
73 elif (xValue.size != self.__listOPCV[i][0].size):
75 elif (numpy.ravel(xValue)[0] - self.__listOPCV[i][0][0]) > (self.__tolerBP * self.__listOPCV[i][2] / self.__listOPCV[i][0].size):
77 elif numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < (self.__tolerBP * self.__listOPCV[i][2]):
79 __HxV = self.__listOPCV[i][1]
83 def storeValueInX(self, xValue, HxValue, oName="" ):
84 "Stocke pour un opérateur o un calcul Hx correspondant à la valeur x"
85 if self.__lenghtOR < 0:
86 self.__lenghtOR = 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
87 self.__initlnOR = self.__lenghtOR
88 self.__seenNames.append(str(oName))
89 if str(oName) not in self.__seenNames: # Etend la liste si nouveau
90 self.__lenghtOR += 2 * min(xValue.size, 50) + 2 # 2 * xValue.size + 2
91 self.__initlnOR += self.__lenghtOR
92 self.__seenNames.append(str(oName))
93 while len(self.__listOPCV) > self.__lenghtOR:
94 self.__listOPCV.pop(0)
95 self.__listOPCV.append( (
96 copy.copy(numpy.ravel(xValue)), # 0 Previous point
97 copy.copy(HxValue), # 1 Previous value
98 numpy.linalg.norm(xValue), # 2 Norm
99 str(oName), # 3 Operator name
104 self.__initlnOR = self.__lenghtOR
106 self.__enabled = False
110 self.__lenghtOR = self.__initlnOR
111 self.__enabled = True
113 # ==============================================================================
114 class Operator(object):
116 Classe générale d'interface de type opérateur simple
124 name = "GenericOperator",
127 avoidingRedundancy = True,
128 inputAsMultiFunction = False,
129 enableMultiProcess = False,
130 extraArguments = None,
133 On construit un objet de ce type en fournissant, à l'aide de l'un des
134 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
137 - name : nom d'opérateur
138 - fromMethod : argument de type fonction Python
139 - fromMatrix : argument adapté au constructeur numpy.matrix
140 - avoidingRedundancy : booléen évitant (ou pas) les calculs redondants
141 - inputAsMultiFunction : booléen indiquant une fonction explicitement
142 définie (ou pas) en multi-fonction
143 - extraArguments : arguments supplémentaires passés à la fonction de
144 base et ses dérivées (tuple ou dictionnaire)
146 self.__name = str(name)
147 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
148 self.__AvoidRC = bool( avoidingRedundancy )
149 self.__inputAsMF = bool( inputAsMultiFunction )
150 self.__mpEnabled = bool( enableMultiProcess )
151 self.__extraArgs = extraArguments
152 if fromMethod is not None and self.__inputAsMF:
153 self.__Method = fromMethod # logtimer(fromMethod)
155 self.__Type = "Method"
156 elif fromMethod is not None and not self.__inputAsMF:
157 self.__Method = partial( MultiFonction, _sFunction=fromMethod, _mpEnabled=self.__mpEnabled)
159 self.__Type = "Method"
160 elif fromMatrix is not None:
162 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
163 self.__Type = "Matrix"
169 def disableAvoidingRedundancy(self):
171 Operator.CM.disable()
173 def enableAvoidingRedundancy(self):
178 Operator.CM.disable()
184 def appliedTo(self, xValue, HValue = None, argsAsSerie = False, returnSerieAsArrayMatrix = False):
186 Permet de restituer le résultat de l'application de l'opérateur à une
187 série d'arguments xValue. Cette méthode se contente d'appliquer, chaque
188 argument devant a priori être du bon type.
190 - les arguments par série sont :
191 - xValue : argument adapté pour appliquer l'opérateur
192 - HValue : valeur précalculée de l'opérateur en ce point
193 - argsAsSerie : indique si les arguments sont une mono ou multi-valeur
200 if HValue is not None:
204 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
206 if _HValue is not None:
207 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
209 for i in range(len(_HValue)):
210 _HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T )
212 Operator.CM.storeValueInX(_xValue[i],_HxValue[-1],self.__name)
217 for i, xv in enumerate(_xValue):
219 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv,self.__name)
221 __alreadyCalculated = False
223 if __alreadyCalculated:
224 self.__addOneCacheCall()
227 if self.__Matrix is not None:
228 self.__addOneMatrixCall()
229 _xv = numpy.ravel(xv).reshape((-1,1))
230 _hv = self.__Matrix * _xv
232 self.__addOneMethodCall()
236 _HxValue.append( _hv )
238 if len(_xserie)>0 and self.__Matrix is None:
239 if self.__extraArgs is None:
240 _hserie = self.__Method( _xserie ) # Calcul MF
242 _hserie = self.__Method( _xserie, self.__extraArgs ) # Calcul MF
243 if not hasattr(_hserie, "pop"):
244 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
250 Operator.CM.storeValueInX(_xv,_hv,self.__name)
252 if returnSerieAsArrayMatrix:
253 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
255 if argsAsSerie: return _HxValue
256 else: return _HxValue[-1]
258 def appliedControledFormTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
260 Permet de restituer le résultat de l'application de l'opérateur à des
261 paires (xValue, uValue). Cette méthode se contente d'appliquer, son
262 argument devant a priori être du bon type. Si la uValue est None,
263 on suppose que l'opérateur ne s'applique qu'à xValue.
265 - paires : les arguments par paire sont :
266 - xValue : argument X adapté pour appliquer l'opérateur
267 - uValue : argument U adapté pour appliquer l'opérateur
268 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
270 if argsAsSerie: _xuValue = paires
271 else: _xuValue = (paires,)
272 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
274 if self.__Matrix is not None:
276 for paire in _xuValue:
277 _xValue, _uValue = paire
278 _xValue = numpy.matrix(numpy.ravel(_xValue)).T
279 self.__addOneMatrixCall()
280 _HxValue.append( self.__Matrix * _xValue )
283 for paire in _xuValue:
284 _xValue, _uValue = paire
285 if _uValue is not None:
286 _xuArgs.append( paire )
288 _xuArgs.append( _xValue )
289 self.__addOneMethodCall( len(_xuArgs) )
290 if self.__extraArgs is None:
291 _HxValue = self.__Method( _xuArgs ) # Calcul MF
293 _HxValue = self.__Method( _xuArgs, self.__extraArgs ) # Calcul MF
295 if returnSerieAsArrayMatrix:
296 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
298 if argsAsSerie: return _HxValue
299 else: return _HxValue[-1]
301 def appliedInXTo(self, paires, argsAsSerie = False, returnSerieAsArrayMatrix = False):
303 Permet de restituer le résultat de l'application de l'opérateur à une
304 série d'arguments xValue, sachant que l'opérateur est valable en
305 xNominal. Cette méthode se contente d'appliquer, son argument devant a
306 priori être du bon type. Si l'opérateur est linéaire car c'est une
307 matrice, alors il est valable en tout point nominal et xNominal peut
308 être quelconque. Il n'y a qu'une seule paire par défaut, et argsAsSerie
309 permet d'indiquer que l'argument est multi-paires.
311 - paires : les arguments par paire sont :
312 - xNominal : série d'arguments permettant de donner le point où
313 l'opérateur est construit pour être ensuite appliqué
314 - xValue : série d'arguments adaptés pour appliquer l'opérateur
315 - argsAsSerie : indique si l'argument est une mono ou multi-valeur
317 if argsAsSerie: _nxValue = paires
318 else: _nxValue = (paires,)
319 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
321 if self.__Matrix is not None:
323 for paire in _nxValue:
324 _xNominal, _xValue = paire
325 _xValue = numpy.matrix(numpy.ravel(_xValue)).T
326 self.__addOneMatrixCall()
327 _HxValue.append( self.__Matrix * _xValue )
329 self.__addOneMethodCall( len(_nxValue) )
330 if self.__extraArgs is None:
331 _HxValue = self.__Method( _nxValue ) # Calcul MF
333 _HxValue = self.__Method( _nxValue, self.__extraArgs ) # Calcul MF
335 if returnSerieAsArrayMatrix:
336 _HxValue = numpy.stack([numpy.ravel(_hv) for _hv in _HxValue], axis=1)
338 if argsAsSerie: return _HxValue
339 else: return _HxValue[-1]
341 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
343 Permet de renvoyer l'opérateur sous la forme d'une matrice
345 if self.__Matrix is not None:
346 self.__addOneMatrixCall()
347 mValue = [self.__Matrix,]
348 elif not isinstance(ValueForMethodForm,str) or ValueForMethodForm != "UnknownVoidValue": # Ne pas utiliser "None"
351 self.__addOneMethodCall( len(ValueForMethodForm) )
352 for _vfmf in ValueForMethodForm:
353 mValue.append( numpy.matrix( self.__Method(((_vfmf, None),)) ) )
355 self.__addOneMethodCall()
356 mValue = self.__Method(((ValueForMethodForm, None),))
358 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
360 if argsAsSerie: return mValue
361 else: return mValue[-1]
365 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
366 la forme d'une matrice
368 if self.__Matrix is not None:
369 return self.__Matrix.shape
371 raise ValueError("Matrix form of the operator is not available, nor the shape")
373 def nbcalls(self, which=None):
375 Renvoie les nombres d'évaluations de l'opérateur
378 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
379 self.__NbCallsAsMatrix,
380 self.__NbCallsAsMethod,
381 self.__NbCallsOfCached,
382 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
383 Operator.NbCallsAsMatrix,
384 Operator.NbCallsAsMethod,
385 Operator.NbCallsOfCached,
387 if which is None: return __nbcalls
388 else: return __nbcalls[which]
390 def __addOneMatrixCall(self):
391 "Comptabilise un appel"
392 self.__NbCallsAsMatrix += 1 # Decompte local
393 Operator.NbCallsAsMatrix += 1 # Decompte global
395 def __addOneMethodCall(self, nb = 1):
396 "Comptabilise un appel"
397 self.__NbCallsAsMethod += nb # Decompte local
398 Operator.NbCallsAsMethod += nb # Decompte global
400 def __addOneCacheCall(self):
401 "Comptabilise un appel"
402 self.__NbCallsOfCached += 1 # Decompte local
403 Operator.NbCallsOfCached += 1 # Decompte global
405 # ==============================================================================
406 class FullOperator(object):
408 Classe générale d'interface de type opérateur complet
409 (Direct, Linéaire Tangent, Adjoint)
412 name = "GenericFullOperator",
414 asOneFunction = None, # 1 Fonction
415 asThreeFunctions = None, # 3 Fonctions in a dictionary
416 asScript = None, # 1 or 3 Fonction(s) by script
417 asDict = None, # Parameters
419 extraArguments = None,
421 inputAsMF = False,# Fonction(s) as Multi-Functions
426 self.__name = str(name)
427 self.__check = bool(toBeChecked)
428 self.__extraArgs = extraArguments
433 if (asDict is not None) and isinstance(asDict, dict):
434 __Parameters.update( asDict )
435 # Priorité à EnableMultiProcessingInDerivatives=True
436 if "EnableMultiProcessing" in __Parameters and __Parameters["EnableMultiProcessing"]:
437 __Parameters["EnableMultiProcessingInDerivatives"] = True
438 __Parameters["EnableMultiProcessingInEvaluation"] = False
439 if "EnableMultiProcessingInDerivatives" not in __Parameters:
440 __Parameters["EnableMultiProcessingInDerivatives"] = False
441 if __Parameters["EnableMultiProcessingInDerivatives"]:
442 __Parameters["EnableMultiProcessingInEvaluation"] = False
443 if "EnableMultiProcessingInEvaluation" not in __Parameters:
444 __Parameters["EnableMultiProcessingInEvaluation"] = False
445 if "withIncrement" in __Parameters: # Temporaire
446 __Parameters["DifferentialIncrement"] = __Parameters["withIncrement"]
448 if asScript is not None:
449 __Matrix, __Function = None, None
451 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
453 __Function = { "Direct":Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ) }
454 __Function.update({"useApproximatedDerivatives":True})
455 __Function.update(__Parameters)
456 elif asThreeFunctions:
458 "Direct" :Interfaces.ImportFromScript(asScript).getvalue( "DirectOperator" ),
459 "Tangent":Interfaces.ImportFromScript(asScript).getvalue( "TangentOperator" ),
460 "Adjoint":Interfaces.ImportFromScript(asScript).getvalue( "AdjointOperator" ),
462 __Function.update(__Parameters)
465 if asOneFunction is not None:
466 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
467 if asOneFunction["Direct"] is not None:
468 __Function = asOneFunction
470 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
472 __Function = { "Direct":asOneFunction }
473 __Function.update({"useApproximatedDerivatives":True})
474 __Function.update(__Parameters)
475 elif asThreeFunctions is not None:
476 if isinstance(asThreeFunctions, dict) and \
477 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
478 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
479 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
480 __Function = asThreeFunctions
481 elif isinstance(asThreeFunctions, dict) and \
482 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
483 __Function = asThreeFunctions
484 __Function.update({"useApproximatedDerivatives":True})
486 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
487 if "Direct" not in asThreeFunctions:
488 __Function["Direct"] = asThreeFunctions["Tangent"]
489 __Function.update(__Parameters)
493 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
494 # for k in ("Direct", "Tangent", "Adjoint"):
495 # if k in __Function and hasattr(__Function[k],"__class__"):
496 # if type(__Function[k]) is type(self.__init__):
497 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
499 if appliedInX is not None and isinstance(appliedInX, dict):
500 __appliedInX = appliedInX
501 elif appliedInX is not None:
502 __appliedInX = {"HXb":appliedInX}
506 if scheduledBy is not None:
507 self.__T = scheduledBy
509 if isinstance(__Function, dict) and \
510 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
511 ("Direct" in __Function) and (__Function["Direct"] is not None):
512 if "CenteredFiniteDifference" not in __Function: __Function["CenteredFiniteDifference"] = False
513 if "DifferentialIncrement" not in __Function: __Function["DifferentialIncrement"] = 0.01
514 if "withdX" not in __Function: __Function["withdX"] = None
515 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = avoidRC
516 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
517 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
518 if "NumberOfProcesses" not in __Function: __Function["NumberOfProcesses"] = None
519 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
520 from daCore import NumericObjects
521 FDA = NumericObjects.FDApproximation(
523 Function = __Function["Direct"],
524 centeredDF = __Function["CenteredFiniteDifference"],
525 increment = __Function["DifferentialIncrement"],
526 dX = __Function["withdX"],
527 extraArguments = self.__extraArgs,
528 avoidingRedundancy = __Function["withAvoidingRedundancy"],
529 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
530 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
531 mpEnabled = __Function["EnableMultiProcessingInDerivatives"],
532 mpWorkers = __Function["NumberOfProcesses"],
533 mfEnabled = __Function["withmfEnabled"],
535 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
536 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
537 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
538 elif isinstance(__Function, dict) and \
539 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
540 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
541 self.__FO["Direct"] = Operator( name = self.__name, fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
542 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
543 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, extraArguments = self.__extraArgs )
544 elif asMatrix is not None:
545 __matrice = numpy.matrix( __Matrix, numpy.float )
546 self.__FO["Direct"] = Operator( name = self.__name, fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF, enableMultiProcess = __Parameters["EnableMultiProcessingInEvaluation"] )
547 self.__FO["Tangent"] = Operator( name = self.__name+"Tangent", fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
548 self.__FO["Adjoint"] = Operator( name = self.__name+"Adjoint", fromMatrix = __matrice.T, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
551 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a matrix, a Direct operator for approximate derivatives or a Tangent/Adjoint operators pair. Please check your operator input."%self.__name)
553 if __appliedInX is not None:
554 self.__FO["AppliedInX"] = {}
555 for key in list(__appliedInX.keys()):
556 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
557 # Pour le cas où l'on a une vraie matrice
558 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
559 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
560 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
561 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
563 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
565 self.__FO["AppliedInX"] = None
571 "x.__repr__() <==> repr(x)"
572 return repr(self.__FO)
575 "x.__str__() <==> str(x)"
576 return str(self.__FO)
578 # ==============================================================================
579 class Algorithm(object):
581 Classe générale d'interface de type algorithme
583 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
584 d'assimilation, en fournissant un container (dictionnaire) de variables
585 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
587 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
589 def __init__(self, name):
591 L'initialisation présente permet de fabriquer des variables de stockage
592 disponibles de manière générique dans les algorithmes élémentaires. Ces
593 variables de stockage sont ensuite conservées dans un dictionnaire
594 interne à l'objet, mais auquel on accède par la méthode "get".
596 Les variables prévues sont :
597 - APosterioriCorrelations : matrice de corrélations de la matrice A
598 - APosterioriCovariance : matrice de covariances a posteriori : A
599 - APosterioriStandardDeviations : vecteur des écart-types de la matrice A
600 - APosterioriVariances : vecteur des variances de la matrice A
601 - Analysis : vecteur d'analyse : Xa
602 - BMA : Background moins Analysis : Xa - Xb
603 - CostFunctionJ : fonction-coût globale, somme des deux parties suivantes Jb et Jo
604 - CostFunctionJAtCurrentOptimum : fonction-coût globale à l'état optimal courant lors d'itérations
605 - CostFunctionJb : partie ébauche ou background de la fonction-coût : Jb
606 - CostFunctionJbAtCurrentOptimum : partie ébauche à l'état optimal courant lors d'itérations
607 - CostFunctionJo : partie observations de la fonction-coût : Jo
608 - CostFunctionJoAtCurrentOptimum : partie observations à l'état optimal courant lors d'itérations
609 - CurrentIterationNumber : numéro courant d'itération dans les algorithmes itératifs, à partir de 0
610 - CurrentOptimum : état optimal courant lors d'itérations
611 - CurrentState : état courant lors d'itérations
612 - GradientOfCostFunctionJ : gradient de la fonction-coût globale
613 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-coût
614 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-coût
615 - IndexOfOptimum : index de l'état optimal courant lors d'itérations
616 - Innovation : l'innovation : d = Y - H(X)
617 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
618 - JacobianMatrixAtBackground : matrice jacobienne à l'état d'ébauche
619 - JacobianMatrixAtCurrentState : matrice jacobienne à l'état courant
620 - JacobianMatrixAtOptimum : matrice jacobienne à l'optimum
621 - KalmanGainAtOptimum : gain de Kalman à l'optimum
622 - MahalanobisConsistency : indicateur de consistance des covariances
623 - OMA : Observation moins Analyse : Y - Xa
624 - OMB : Observation moins Background : Y - Xb
625 - ForecastCovariance : covariance de l'état prédit courant lors d'itérations
626 - ForecastState : état prédit courant lors d'itérations
627 - Residu : dans le cas des algorithmes de vérification
628 - SampledStateForQuantiles : échantillons d'états pour l'estimation des quantiles
629 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
630 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
631 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
632 - SimulatedObservationAtCurrentOptimum : l'état observé H(X) à l'état optimal courant
633 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
634 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
635 - SimulationQuantiles : états observés H(X) pour les quantiles demandés
636 On peut rajouter des variables à stocker dans l'initialisation de
637 l'algorithme élémentaire qui va hériter de cette classe
639 logging.debug("%s Initialisation", str(name))
640 self._m = PlatformInfo.SystemUsage()
642 self._name = str( name )
643 self._parameters = {"StoreSupplementaryCalculations":[]}
644 self.__internal_state = {}
645 self.__required_parameters = {}
646 self.__required_inputs = {
647 "RequiredInputValues":{"mandatory":(), "optional":()},
648 "ClassificationTags":[],
650 self.__variable_names_not_public = {"nextStep":False} # Duplication dans AlgorithmAndParameters
651 self.__canonical_parameter_name = {} # Correspondance "lower"->"correct"
652 self.__canonical_stored_name = {} # Correspondance "lower"->"correct"
654 self.StoredVariables = {}
655 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
656 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
657 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
658 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
659 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
660 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
661 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
662 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
663 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
664 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
665 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
666 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
667 self.StoredVariables["CurrentEnsembleState"] = Persistence.OneMatrix(name = "CurrentEnsembleState")
668 self.StoredVariables["CurrentIterationNumber"] = Persistence.OneIndex(name = "CurrentIterationNumber")
669 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
670 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
671 self.StoredVariables["ForecastCovariance"] = Persistence.OneMatrix(name = "ForecastCovariance")
672 self.StoredVariables["ForecastState"] = Persistence.OneVector(name = "ForecastState")
673 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
674 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
675 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
676 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
677 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
678 self.StoredVariables["InnovationAtCurrentAnalysis"] = Persistence.OneVector(name = "InnovationAtCurrentAnalysis")
679 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
680 self.StoredVariables["JacobianMatrixAtBackground"] = Persistence.OneMatrix(name = "JacobianMatrixAtBackground")
681 self.StoredVariables["JacobianMatrixAtCurrentState"] = Persistence.OneMatrix(name = "JacobianMatrixAtCurrentState")
682 self.StoredVariables["JacobianMatrixAtOptimum"] = Persistence.OneMatrix(name = "JacobianMatrixAtOptimum")
683 self.StoredVariables["KalmanGainAtOptimum"] = Persistence.OneMatrix(name = "KalmanGainAtOptimum")
684 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
685 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
686 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
687 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
688 self.StoredVariables["SampledStateForQuantiles"] = Persistence.OneMatrix(name = "SampledStateForQuantiles")
689 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
690 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
691 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
692 self.StoredVariables["SimulatedObservationAtCurrentAnalysis"]= Persistence.OneVector(name = "SimulatedObservationAtCurrentAnalysis")
693 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
694 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
695 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
696 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
698 for k in self.StoredVariables:
699 self.__canonical_stored_name[k.lower()] = k
701 for k, v in self.__variable_names_not_public.items():
702 self.__canonical_parameter_name[k.lower()] = k
703 self.__canonical_parameter_name["algorithm"] = "Algorithm"
704 self.__canonical_parameter_name["storesupplementarycalculations"] = "StoreSupplementaryCalculations"
706 def _pre_run(self, Parameters, Xb=None, Y=None, U=None, HO=None, EM=None, CM=None, R=None, B=None, Q=None ):
708 logging.debug("%s Lancement", self._name)
709 logging.debug("%s Taille mémoire utilisée de %.0f Mio"%(self._name, self._m.getUsedMemory("Mio")))
710 self._getTimeState(reset=True)
712 # Mise a jour des paramètres internes avec le contenu de Parameters, en
713 # reprenant les valeurs par défauts pour toutes celles non définies
714 self.__setParameters(Parameters, reset=True)
715 for k, v in self.__variable_names_not_public.items():
716 if k not in self._parameters: self.__setParameters( {k:v} )
718 # Corrections et compléments des vecteurs
719 def __test_vvalue(argument, variable, argname, symbol=None):
720 if symbol is None: symbol = variable
722 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
723 raise ValueError("%s %s vector %s is not set and has to be properly defined!"%(self._name,argname,symbol))
724 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
725 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,symbol))
727 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,symbol))
729 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
730 logging.debug("%s %s vector %s is required and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
731 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
732 logging.debug("%s %s vector %s is optional and set, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
734 logging.debug("%s %s vector %s is set although neither required nor optional, and its size is %i."%(self._name,argname,symbol,numpy.array(argument).size))
736 __test_vvalue( Xb, "Xb", "Background or initial state" )
737 __test_vvalue( Y, "Y", "Observation" )
738 __test_vvalue( U, "U", "Control" )
740 # Corrections et compléments des covariances
741 def __test_cvalue(argument, variable, argname, symbol=None):
742 if symbol is None: symbol = variable
744 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
745 raise ValueError("%s %s error covariance matrix %s is not set and has to be properly defined!"%(self._name,argname,symbol))
746 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
747 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,symbol))
749 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,symbol))
751 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
752 logging.debug("%s %s error covariance matrix %s is required and set."%(self._name,argname,symbol))
753 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
754 logging.debug("%s %s error covariance matrix %s is optional and set."%(self._name,argname,symbol))
756 logging.debug("%s %s error covariance matrix %s is set although neither required nor optional."%(self._name,argname,symbol))
758 __test_cvalue( B, "B", "Background" )
759 __test_cvalue( R, "R", "Observation" )
760 __test_cvalue( Q, "Q", "Evolution" )
762 # Corrections et compléments des opérateurs
763 def __test_ovalue(argument, variable, argname, symbol=None):
764 if symbol is None: symbol = variable
765 if argument is None or (isinstance(argument,dict) and len(argument)==0):
766 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
767 raise ValueError("%s %s operator %s is not set and has to be properly defined!"%(self._name,argname,symbol))
768 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
769 logging.debug("%s %s operator %s is not set, but is optional."%(self._name,argname,symbol))
771 logging.debug("%s %s operator %s is not set, but is not required."%(self._name,argname,symbol))
773 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
774 logging.debug("%s %s operator %s is required and set."%(self._name,argname,symbol))
775 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
776 logging.debug("%s %s operator %s is optional and set."%(self._name,argname,symbol))
778 logging.debug("%s %s operator %s is set although neither required nor optional."%(self._name,argname,symbol))
780 __test_ovalue( HO, "HO", "Observation", "H" )
781 __test_ovalue( EM, "EM", "Evolution", "M" )
782 __test_ovalue( CM, "CM", "Control Model", "C" )
784 # Corrections et compléments des bornes
785 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
786 logging.debug("%s Bounds taken into account"%(self._name,))
788 self._parameters["Bounds"] = None
789 if ("StateBoundsForQuantiles" in self._parameters) and isinstance(self._parameters["StateBoundsForQuantiles"], (list, tuple)) and (len(self._parameters["StateBoundsForQuantiles"]) > 0):
790 logging.debug("%s Bounds for quantiles states taken into account"%(self._name,))
791 # Attention : contrairement à Bounds, pas de défaut à None, sinon on ne peut pas être sans bornes
793 # Corrections et compléments de l'initialisation en X
794 if "InitializationPoint" in self._parameters:
796 if self._parameters["InitializationPoint"] is not None and hasattr(self._parameters["InitializationPoint"],'size'):
797 if self._parameters["InitializationPoint"].size != numpy.ravel(Xb).size:
798 raise ValueError("Incompatible size %i of forced initial point that have to replace the background of size %i" \
799 %(self._parameters["InitializationPoint"].size,numpy.ravel(Xb).size))
800 # Obtenu par typecast : numpy.ravel(self._parameters["InitializationPoint"])
802 self._parameters["InitializationPoint"] = numpy.ravel(Xb)
804 if self._parameters["InitializationPoint"] is None:
805 raise ValueError("Forced initial point can not be set without any given Background or required value")
807 # Correction pour pallier a un bug de TNC sur le retour du Minimum
808 if "Minimizer" in self._parameters and self._parameters["Minimizer"] == "TNC":
809 self.setParameterValue("StoreInternalVariables",True)
811 # Verbosité et logging
812 if logging.getLogger().level < logging.WARNING:
813 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
814 if PlatformInfo.has_scipy:
815 import scipy.optimize
816 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
818 self._parameters["optmessages"] = 15
820 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
821 if PlatformInfo.has_scipy:
822 import scipy.optimize
823 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
825 self._parameters["optmessages"] = 15
829 def _post_run(self,_oH=None):
831 if ("StoreSupplementaryCalculations" in self._parameters) and \
832 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
833 for _A in self.StoredVariables["APosterioriCovariance"]:
834 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
835 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
836 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
837 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
838 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
839 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
840 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
841 self.StoredVariables["APosterioriCorrelations"].store( _C )
842 if _oH is not None and "Direct" in _oH and "Tangent" in _oH and "Adjoint" in _oH:
843 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
844 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
845 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
846 logging.debug("%s Durées d'utilisation CPU de %.1fs et elapsed de %.1fs", self._name, self._getTimeState()[0], self._getTimeState()[1])
847 logging.debug("%s Terminé", self._name)
850 def _toStore(self, key):
851 "True if in StoreSupplementaryCalculations, else False"
852 return key in self._parameters["StoreSupplementaryCalculations"]
854 def get(self, key=None):
856 Renvoie l'une des variables stockées identifiée par la clé, ou le
857 dictionnaire de l'ensemble des variables disponibles en l'absence de
858 clé. Ce sont directement les variables sous forme objet qui sont
859 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
860 des classes de persistance.
863 return self.StoredVariables[self.__canonical_stored_name[key.lower()]]
865 return self.StoredVariables
867 def __contains__(self, key=None):
868 "D.__contains__(k) -> True if D has a key k, else False"
869 if key is None or key.lower() not in self.__canonical_stored_name:
872 return self.__canonical_stored_name[key.lower()] in self.StoredVariables
875 "D.keys() -> list of D's keys"
876 if hasattr(self, "StoredVariables"):
877 return self.StoredVariables.keys()
882 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
883 if hasattr(self, "StoredVariables") and k.lower() in self.__canonical_stored_name:
884 return self.StoredVariables.pop(self.__canonical_stored_name[k.lower()], d)
889 raise TypeError("pop expected at least 1 arguments, got 0")
890 "If key is not found, d is returned if given, otherwise KeyError is raised"
896 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
898 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
899 sa forme mathématique la plus naturelle possible.
901 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
903 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None, listadv = None):
905 Permet de définir dans l'algorithme des paramètres requis et leurs
906 caractéristiques par défaut.
909 raise ValueError("A name is mandatory to define a required parameter.")
911 self.__required_parameters[name] = {
913 "typecast" : typecast,
920 self.__canonical_parameter_name[name.lower()] = name
921 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
923 def getRequiredParameters(self, noDetails=True):
925 Renvoie la liste des noms de paramètres requis ou directement le
926 dictionnaire des paramètres requis.
929 return sorted(self.__required_parameters.keys())
931 return self.__required_parameters
933 def setParameterValue(self, name=None, value=None):
935 Renvoie la valeur d'un paramètre requis de manière contrôlée
937 __k = self.__canonical_parameter_name[name.lower()]
938 default = self.__required_parameters[__k]["default"]
939 typecast = self.__required_parameters[__k]["typecast"]
940 minval = self.__required_parameters[__k]["minval"]
941 maxval = self.__required_parameters[__k]["maxval"]
942 listval = self.__required_parameters[__k]["listval"]
943 listadv = self.__required_parameters[__k]["listadv"]
945 if value is None and default is None:
947 elif value is None and default is not None:
948 if typecast is None: __val = default
949 else: __val = typecast( default )
951 if typecast is None: __val = value
954 __val = typecast( value )
956 raise ValueError("The value '%s' for the parameter named '%s' can not be correctly evaluated with type '%s'."%(value, __k, typecast))
958 if minval is not None and (numpy.array(__val, float) < minval).any():
959 raise ValueError("The parameter named '%s' of value '%s' can not be less than %s."%(__k, __val, minval))
960 if maxval is not None and (numpy.array(__val, float) > maxval).any():
961 raise ValueError("The parameter named '%s' of value '%s' can not be greater than %s."%(__k, __val, maxval))
962 if listval is not None or listadv is not None:
963 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
965 if listval is not None and v in listval: continue
966 elif listadv is not None and v in listadv: continue
968 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%(v, __k, listval))
969 elif not (listval is not None and __val in listval) and not (listadv is not None and __val in listadv):
970 raise ValueError("The value '%s' is not allowed for the parameter named '%s', it has to be in the list %s."%( __val, __k,listval))
974 def requireInputArguments(self, mandatory=(), optional=()):
976 Permet d'imposer des arguments de calcul requis en entrée.
978 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
979 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
981 def getInputArguments(self):
983 Permet d'obtenir les listes des arguments de calcul requis en entrée.
985 return self.__required_inputs["RequiredInputValues"]["mandatory"], self.__required_inputs["RequiredInputValues"]["optional"]
987 def setAttributes(self, tags=()):
989 Permet d'adjoindre des attributs comme les tags de classification.
990 Renvoie la liste actuelle dans tous les cas.
992 self.__required_inputs["ClassificationTags"].extend( tags )
993 return self.__required_inputs["ClassificationTags"]
995 def __setParameters(self, fromDico={}, reset=False):
997 Permet de stocker les paramètres reçus dans le dictionnaire interne.
999 self._parameters.update( fromDico )
1000 __inverse_fromDico_keys = {}
1001 for k in fromDico.keys():
1002 if k.lower() in self.__canonical_parameter_name:
1003 __inverse_fromDico_keys[self.__canonical_parameter_name[k.lower()]] = k
1004 #~ __inverse_fromDico_keys = dict([(self.__canonical_parameter_name[k.lower()],k) for k in fromDico.keys()])
1005 __canonic_fromDico_keys = __inverse_fromDico_keys.keys()
1006 for k in self.__required_parameters.keys():
1007 if k in __canonic_fromDico_keys:
1008 self._parameters[k] = self.setParameterValue(k,fromDico[__inverse_fromDico_keys[k]])
1010 self._parameters[k] = self.setParameterValue(k)
1013 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
1015 def _setInternalState(self, key=None, value=None, fromDico={}, reset=False):
1017 Permet de stocker des variables nommées constituant l'état interne
1019 if reset: # Vide le dictionnaire préalablement
1020 self.__internal_state = {}
1021 if key is not None and value is not None:
1022 self.__internal_state[key] = value
1023 self.__internal_state.update( dict(fromDico) )
1025 def _getInternalState(self, key=None):
1027 Restitue un état interne sous la forme d'un dictionnaire de variables nommées
1029 if key is not None and key in self.__internal_state:
1030 return self.__internal_state[key]
1032 return self.__internal_state
1034 def _getTimeState(self, reset=False):
1036 Initialise ou restitue le temps de calcul (cpu/elapsed) à la seconde
1039 self.__initial_cpu_time = time.process_time()
1040 self.__initial_elapsed_time = time.perf_counter()
1043 self.__cpu_time = time.process_time() - self.__initial_cpu_time
1044 self.__elapsed_time = time.perf_counter() - self.__initial_elapsed_time
1045 return self.__cpu_time, self.__elapsed_time
1047 def _StopOnTimeLimit(self, X=None, withReason=False):
1048 "Stop criteria on time limit: True/False [+ Reason]"
1049 c, e = self._getTimeState()
1050 if "MaximumCpuTime" in self._parameters and c > self._parameters["MaximumCpuTime"]:
1051 __SC, __SR = True, "Reached maximum CPU time (%.1fs > %.1fs)"%(c, self._parameters["MaximumCpuTime"])
1052 elif "MaximumElapsedTime" in self._parameters and e > self._parameters["MaximumElapsedTime"]:
1053 __SC, __SR = True, "Reached maximum elapsed time (%.1fs > %.1fs)"%(e, self._parameters["MaximumElapsedTime"])
1055 __SC, __SR = False, ""
1061 # ==============================================================================
1062 class AlgorithmAndParameters(object):
1064 Classe générale d'interface d'action pour l'algorithme et ses paramètres
1067 name = "GenericAlgorithm",
1074 self.__name = str(name)
1078 self.__algorithm = {}
1079 self.__algorithmFile = None
1080 self.__algorithmName = None
1082 self.updateParameters( asDict, asScript )
1084 if asAlgorithm is None and asScript is not None:
1085 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1087 __Algo = asAlgorithm
1089 if __Algo is not None:
1090 self.__A = str(__Algo)
1091 self.__P.update( {"Algorithm":self.__A} )
1093 self.__setAlgorithm( self.__A )
1095 self.__variable_names_not_public = {"nextStep":False} # Duplication dans Algorithm
1097 def updateParameters(self,
1101 "Mise a jour des parametres"
1102 if asDict is None and asScript is not None:
1103 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1107 if __Dict is not None:
1108 self.__P.update( dict(__Dict) )
1110 def executePythonScheme(self, asDictAO = None):
1111 "Permet de lancer le calcul d'assimilation"
1112 Operator.CM.clearCache()
1114 if not isinstance(asDictAO, dict):
1115 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
1116 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
1117 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
1118 else: self.__Xb = None
1119 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
1120 else: self.__Y = asDictAO["Observation"]
1121 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
1122 else: self.__U = asDictAO["ControlInput"]
1123 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
1124 else: self.__HO = asDictAO["ObservationOperator"]
1125 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
1126 else: self.__EM = asDictAO["EvolutionModel"]
1127 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
1128 else: self.__CM = asDictAO["ControlModel"]
1129 self.__B = asDictAO["BackgroundError"]
1130 self.__R = asDictAO["ObservationError"]
1131 self.__Q = asDictAO["EvolutionError"]
1133 self.__shape_validate()
1135 self.__algorithm.run(
1145 Parameters = self.__P,
1149 def executeYACSScheme(self, FileName=None):
1150 "Permet de lancer le calcul d'assimilation"
1151 if FileName is None or not os.path.exists(FileName):
1152 raise ValueError("a YACS file name has to be given for YACS execution.\n")
1154 __file = os.path.abspath(FileName)
1155 logging.debug("The YACS file name is \"%s\"."%__file)
1156 if not PlatformInfo.has_salome or \
1157 not PlatformInfo.has_yacs or \
1158 not PlatformInfo.has_adao:
1159 raise ImportError("\n\n"+\
1160 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
1161 "Please load the right environnement before trying to use it.\n")
1164 import SALOMERuntime
1166 SALOMERuntime.RuntimeSALOME_setRuntime()
1168 r = pilot.getRuntime()
1169 xmlLoader = loader.YACSLoader()
1170 xmlLoader.registerProcCataLoader()
1172 catalogAd = r.loadCatalog("proc", __file)
1173 r.addCatalog(catalogAd)
1178 p = xmlLoader.load(__file)
1179 except IOError as ex:
1180 print("The YACS XML schema file can not be loaded: %s"%(ex,))
1182 logger = p.getLogger("parser")
1183 if not logger.isEmpty():
1184 print("The imported YACS XML schema has errors on parsing:")
1185 print(logger.getStr())
1188 print("The YACS XML schema is not valid and will not be executed:")
1189 print(p.getErrorReport())
1191 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1192 p.checkConsistency(info)
1193 if info.areWarningsOrErrors():
1194 print("The YACS XML schema is not coherent and will not be executed:")
1195 print(info.getGlobalRepr())
1197 e = pilot.ExecutorSwig()
1199 if p.getEffectiveState() != pilot.DONE:
1200 print(p.getErrorReport())
1204 def get(self, key = None):
1205 "Vérifie l'existence d'une clé de variable ou de paramètres"
1206 if key in self.__algorithm:
1207 return self.__algorithm.get( key )
1208 elif key in self.__P:
1209 return self.__P[key]
1211 allvariables = self.__P
1212 for k in self.__variable_names_not_public: allvariables.pop(k, None)
1215 def pop(self, k, d):
1216 "Necessaire pour le pickling"
1217 return self.__algorithm.pop(k, d)
1219 def getAlgorithmRequiredParameters(self, noDetails=True):
1220 "Renvoie la liste des paramètres requis selon l'algorithme"
1221 return self.__algorithm.getRequiredParameters(noDetails)
1223 def getAlgorithmInputArguments(self):
1224 "Renvoie la liste des entrées requises selon l'algorithme"
1225 return self.__algorithm.getInputArguments()
1227 def getAlgorithmAttributes(self):
1228 "Renvoie la liste des attributs selon l'algorithme"
1229 return self.__algorithm.setAttributes()
1231 def setObserver(self, __V, __O, __I, __S):
1232 if self.__algorithm is None \
1233 or isinstance(self.__algorithm, dict) \
1234 or not hasattr(self.__algorithm,"StoredVariables"):
1235 raise ValueError("No observer can be build before choosing an algorithm.")
1236 if __V not in self.__algorithm:
1237 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1239 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1242 HookParameters = __I,
1245 def removeObserver(self, __V, __O, __A = False):
1246 if self.__algorithm is None \
1247 or isinstance(self.__algorithm, dict) \
1248 or not hasattr(self.__algorithm,"StoredVariables"):
1249 raise ValueError("No observer can be removed before choosing an algorithm.")
1250 if __V not in self.__algorithm:
1251 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1253 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1258 def hasObserver(self, __V):
1259 if self.__algorithm is None \
1260 or isinstance(self.__algorithm, dict) \
1261 or not hasattr(self.__algorithm,"StoredVariables"):
1263 if __V not in self.__algorithm:
1265 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1268 __allvariables = list(self.__algorithm.keys()) + list(self.__P.keys())
1269 for k in self.__variable_names_not_public:
1270 if k in __allvariables: __allvariables.remove(k)
1271 return __allvariables
1273 def __contains__(self, key=None):
1274 "D.__contains__(k) -> True if D has a key k, else False"
1275 return key in self.__algorithm or key in self.__P
1278 "x.__repr__() <==> repr(x)"
1279 return repr(self.__A)+", "+repr(self.__P)
1282 "x.__str__() <==> str(x)"
1283 return str(self.__A)+", "+str(self.__P)
1285 def __setAlgorithm(self, choice = None ):
1287 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1288 d'assimilation. L'argument est un champ caractère se rapportant au nom
1289 d'un algorithme réalisant l'opération sur les arguments fixes.
1292 raise ValueError("Error: algorithm choice has to be given")
1293 if self.__algorithmName is not None:
1294 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1295 daDirectory = "daAlgorithms"
1297 # Recherche explicitement le fichier complet
1298 # ------------------------------------------
1300 for directory in sys.path:
1301 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1302 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1303 if module_path is None:
1304 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1306 # Importe le fichier complet comme un module
1307 # ------------------------------------------
1309 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1310 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1311 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1312 raise ImportError("this module does not define a valid elementary algorithm.")
1313 self.__algorithmName = str(choice)
1314 sys.path = sys_path_tmp ; del sys_path_tmp
1315 except ImportError as e:
1316 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1318 # Instancie un objet du type élémentaire du fichier
1319 # -------------------------------------------------
1320 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1323 def __shape_validate(self):
1325 Validation de la correspondance correcte des tailles des variables et
1326 des matrices s'il y en a.
1328 if self.__Xb is None: __Xb_shape = (0,)
1329 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1330 elif hasattr(self.__Xb,"shape"):
1331 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1332 else: __Xb_shape = self.__Xb.shape()
1333 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1335 if self.__Y is None: __Y_shape = (0,)
1336 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1337 elif hasattr(self.__Y,"shape"):
1338 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1339 else: __Y_shape = self.__Y.shape()
1340 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1342 if self.__U is None: __U_shape = (0,)
1343 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1344 elif hasattr(self.__U,"shape"):
1345 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1346 else: __U_shape = self.__U.shape()
1347 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1349 if self.__B is None: __B_shape = (0,0)
1350 elif hasattr(self.__B,"shape"):
1351 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1352 else: __B_shape = self.__B.shape()
1353 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1355 if self.__R is None: __R_shape = (0,0)
1356 elif hasattr(self.__R,"shape"):
1357 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1358 else: __R_shape = self.__R.shape()
1359 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1361 if self.__Q is None: __Q_shape = (0,0)
1362 elif hasattr(self.__Q,"shape"):
1363 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1364 else: __Q_shape = self.__Q.shape()
1365 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1367 if len(self.__HO) == 0: __HO_shape = (0,0)
1368 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1369 elif hasattr(self.__HO["Direct"],"shape"):
1370 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1371 else: __HO_shape = self.__HO["Direct"].shape()
1372 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1374 if len(self.__EM) == 0: __EM_shape = (0,0)
1375 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1376 elif hasattr(self.__EM["Direct"],"shape"):
1377 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1378 else: __EM_shape = self.__EM["Direct"].shape()
1379 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1381 if len(self.__CM) == 0: __CM_shape = (0,0)
1382 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1383 elif hasattr(self.__CM["Direct"],"shape"):
1384 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1385 else: __CM_shape = self.__CM["Direct"].shape()
1386 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1388 # Vérification des conditions
1389 # ---------------------------
1390 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1391 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1392 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1393 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1395 if not( min(__B_shape) == max(__B_shape) ):
1396 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1397 if not( min(__R_shape) == max(__R_shape) ):
1398 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1399 if not( min(__Q_shape) == max(__Q_shape) ):
1400 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1401 if not( min(__EM_shape) == max(__EM_shape) ):
1402 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1404 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1405 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1406 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1407 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1408 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1409 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1410 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1411 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1413 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1414 if self.__algorithmName in ["EnsembleBlue",]:
1415 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1416 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1417 for member in asPersistentVector:
1418 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1419 __Xb_shape = min(__B_shape)
1421 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1423 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1424 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1426 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1427 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1429 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1430 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1432 if ("Bounds" in self.__P) \
1433 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1434 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1435 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1436 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1438 if ("StateBoundsForQuantiles" in self.__P) \
1439 and (isinstance(self.__P["StateBoundsForQuantiles"], list) or isinstance(self.__P["StateBoundsForQuantiles"], tuple)) \
1440 and (len(self.__P["StateBoundsForQuantiles"]) != max(__Xb_shape)):
1441 raise ValueError("The number \"%s\" of bound pairs for the quantile state (X) components is different of the size \"%s\" of the state itself." \
1442 %(len(self.__P["StateBoundsForQuantiles"]),max(__Xb_shape)))
1446 # ==============================================================================
1447 class RegulationAndParameters(object):
1449 Classe générale d'interface d'action pour la régulation et ses paramètres
1452 name = "GenericRegulation",
1459 self.__name = str(name)
1462 if asAlgorithm is None and asScript is not None:
1463 __Algo = Interfaces.ImportFromScript(asScript).getvalue( "Algorithm" )
1465 __Algo = asAlgorithm
1467 if asDict is None and asScript is not None:
1468 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1472 if __Dict is not None:
1473 self.__P.update( dict(__Dict) )
1475 if __Algo is not None:
1476 self.__P.update( {"Algorithm":str(__Algo)} )
1478 def get(self, key = None):
1479 "Vérifie l'existence d'une clé de variable ou de paramètres"
1481 return self.__P[key]
1485 # ==============================================================================
1486 class DataObserver(object):
1488 Classe générale d'interface de type observer
1491 name = "GenericObserver",
1503 self.__name = str(name)
1508 if onVariable is None:
1509 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1510 elif type(onVariable) in (tuple, list):
1511 self.__V = tuple(map( str, onVariable ))
1512 if withInfo is None:
1515 self.__I = (str(withInfo),)*len(self.__V)
1516 elif isinstance(onVariable, str):
1517 self.__V = (onVariable,)
1518 if withInfo is None:
1519 self.__I = (onVariable,)
1521 self.__I = (str(withInfo),)
1523 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1525 if asObsObject is not None:
1526 self.__O = asObsObject
1528 __FunctionText = str(UserScript('Observer', asTemplate, asString, asScript))
1529 __Function = Observer2Func(__FunctionText)
1530 self.__O = __Function.getfunc()
1532 for k in range(len(self.__V)):
1535 if ename not in withAlgo:
1536 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1538 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1541 "x.__repr__() <==> repr(x)"
1542 return repr(self.__V)+"\n"+repr(self.__O)
1545 "x.__str__() <==> str(x)"
1546 return str(self.__V)+"\n"+str(self.__O)
1548 # ==============================================================================
1549 class UserScript(object):
1551 Classe générale d'interface de type texte de script utilisateur
1554 name = "GenericUserScript",
1561 self.__name = str(name)
1563 if asString is not None:
1565 elif self.__name == "UserPostAnalysis" and (asTemplate is not None) and (asTemplate in Templates.UserPostAnalysisTemplates):
1566 self.__F = Templates.UserPostAnalysisTemplates[asTemplate]
1567 elif self.__name == "Observer" and (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1568 self.__F = Templates.ObserverTemplates[asTemplate]
1569 elif asScript is not None:
1570 self.__F = Interfaces.ImportFromScript(asScript).getstring()
1575 "x.__repr__() <==> repr(x)"
1576 return repr(self.__F)
1579 "x.__str__() <==> str(x)"
1580 return str(self.__F)
1582 # ==============================================================================
1583 class ExternalParameters(object):
1585 Classe générale d'interface de type texte de script utilisateur
1588 name = "GenericExternalParameters",
1594 self.__name = str(name)
1597 self.updateParameters( asDict, asScript )
1599 def updateParameters(self,
1603 "Mise a jour des parametres"
1604 if asDict is None and asScript is not None:
1605 __Dict = Interfaces.ImportFromScript(asScript).getvalue( self.__name, "ExternalParameters" )
1609 if __Dict is not None:
1610 self.__P.update( dict(__Dict) )
1612 def get(self, key = None):
1614 return self.__P[key]
1616 return list(self.__P.keys())
1619 return list(self.__P.keys())
1621 def pop(self, k, d):
1622 return self.__P.pop(k, d)
1625 return self.__P.items()
1627 def __contains__(self, key=None):
1628 "D.__contains__(k) -> True if D has a key k, else False"
1629 return key in self.__P
1631 # ==============================================================================
1632 class State(object):
1634 Classe générale d'interface de type état
1637 name = "GenericVector",
1639 asPersistentVector = None,
1645 toBeChecked = False,
1648 Permet de définir un vecteur :
1649 - asVector : entrée des données, comme un vecteur compatible avec le
1650 constructeur de numpy.matrix, ou "True" si entrée par script.
1651 - asPersistentVector : entrée des données, comme une série de vecteurs
1652 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1653 type Persistence, ou "True" si entrée par script.
1654 - asScript : si un script valide est donné contenant une variable
1655 nommée "name", la variable est de type "asVector" (par défaut) ou
1656 "asPersistentVector" selon que l'une de ces variables est placée à
1658 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1659 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1660 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1661 nommée "name"), on récupère les colonnes et on les range ligne après
1662 ligne (colMajor=False, par défaut) ou colonne après colonne
1663 (colMajor=True). La variable résultante est de type "asVector" (par
1664 défaut) ou "asPersistentVector" selon que l'une de ces variables est
1667 self.__name = str(name)
1668 self.__check = bool(toBeChecked)
1672 self.__is_vector = False
1673 self.__is_series = False
1675 if asScript is not None:
1676 __Vector, __Series = None, None
1677 if asPersistentVector:
1678 __Series = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1680 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1681 elif asDataFile is not None:
1682 __Vector, __Series = None, None
1683 if asPersistentVector:
1684 if colNames is not None:
1685 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1687 __Series = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1688 if bool(colMajor) and not Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1689 __Series = numpy.transpose(__Series)
1690 elif not bool(colMajor) and Interfaces.ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1691 __Series = numpy.transpose(__Series)
1693 if colNames is not None:
1694 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( colNames )[1]
1696 __Vector = Interfaces.ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1698 __Vector = numpy.ravel(__Vector, order = "F")
1700 __Vector = numpy.ravel(__Vector, order = "C")
1702 __Vector, __Series = asVector, asPersistentVector
1704 if __Vector is not None:
1705 self.__is_vector = True
1706 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1707 self.shape = self.__V.shape
1708 self.size = self.__V.size
1709 elif __Series is not None:
1710 self.__is_series = True
1711 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1712 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1713 if isinstance(__Series, str): __Series = eval(__Series)
1714 for member in __Series:
1715 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1718 if isinstance(self.__V.shape, (tuple, list)):
1719 self.shape = self.__V.shape
1721 self.shape = self.__V.shape()
1722 if len(self.shape) == 1:
1723 self.shape = (self.shape[0],1)
1724 self.size = self.shape[0] * self.shape[1]
1726 raise ValueError("The %s object is improperly defined or undefined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1728 if scheduledBy is not None:
1729 self.__T = scheduledBy
1731 def getO(self, withScheduler=False):
1733 return self.__V, self.__T
1734 elif self.__T is None:
1740 "Vérification du type interne"
1741 return self.__is_vector
1744 "Vérification du type interne"
1745 return self.__is_series
1748 "x.__repr__() <==> repr(x)"
1749 return repr(self.__V)
1752 "x.__str__() <==> str(x)"
1753 return str(self.__V)
1755 # ==============================================================================
1756 class Covariance(object):
1758 Classe générale d'interface de type covariance
1761 name = "GenericCovariance",
1762 asCovariance = None,
1763 asEyeByScalar = None,
1764 asEyeByVector = None,
1767 toBeChecked = False,
1770 Permet de définir une covariance :
1771 - asCovariance : entrée des données, comme une matrice compatible avec
1772 le constructeur de numpy.matrix
1773 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1774 multiplicatif d'une matrice de corrélation identité, aucune matrice
1775 n'étant donc explicitement à donner
1776 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1777 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1778 n'étant donc explicitement à donner
1779 - asCovObject : entrée des données comme un objet python, qui a les
1780 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1781 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1782 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1783 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1784 pleine doit être vérifié
1786 self.__name = str(name)
1787 self.__check = bool(toBeChecked)
1790 self.__is_scalar = False
1791 self.__is_vector = False
1792 self.__is_matrix = False
1793 self.__is_object = False
1795 if asScript is not None:
1796 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1798 __Scalar = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1800 __Vector = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1802 __Object = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1804 __Matrix = Interfaces.ImportFromScript(asScript).getvalue( self.__name )
1806 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1808 if __Scalar is not None:
1809 if isinstance(__Scalar, str):
1810 __Scalar = __Scalar.replace(";"," ").replace(","," ").split()
1811 if len(__Scalar) > 0: __Scalar = __Scalar[0]
1812 if numpy.array(__Scalar).size != 1:
1813 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.array(__Scalar).size)
1814 self.__is_scalar = True
1815 self.__C = numpy.abs( float(__Scalar) )
1818 elif __Vector is not None:
1819 if isinstance(__Vector, str):
1820 __Vector = __Vector.replace(";"," ").replace(","," ").split()
1821 self.__is_vector = True
1822 self.__C = numpy.abs( numpy.array( numpy.ravel( __Vector ), dtype=float ) )
1823 self.shape = (self.__C.size,self.__C.size)
1824 self.size = self.__C.size**2
1825 elif __Matrix is not None:
1826 self.__is_matrix = True
1827 self.__C = numpy.matrix( __Matrix, float )
1828 self.shape = self.__C.shape
1829 self.size = self.__C.size
1830 elif __Object is not None:
1831 self.__is_object = True
1833 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__matmul__","__mul__","__rmatmul__","__rmul__"):
1834 if not hasattr(self.__C,at):
1835 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1836 if hasattr(self.__C,"shape"):
1837 self.shape = self.__C.shape
1840 if hasattr(self.__C,"size"):
1841 self.size = self.__C.size
1846 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1850 def __validate(self):
1852 if self.__C is None:
1853 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1854 if self.ismatrix() and min(self.shape) != max(self.shape):
1855 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1856 if self.isobject() and min(self.shape) != max(self.shape):
1857 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1858 if self.isscalar() and self.__C <= 0:
1859 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1860 if self.isvector() and (self.__C <= 0).any():
1861 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1862 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1864 L = numpy.linalg.cholesky( self.__C )
1866 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1867 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1869 L = self.__C.cholesky()
1871 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1874 "Vérification du type interne"
1875 return self.__is_scalar
1878 "Vérification du type interne"
1879 return self.__is_vector
1882 "Vérification du type interne"
1883 return self.__is_matrix
1886 "Vérification du type interne"
1887 return self.__is_object
1892 return Covariance(self.__name+"I", asCovariance = numpy.linalg.inv(self.__C) )
1893 elif self.isvector():
1894 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1895 elif self.isscalar():
1896 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1897 elif self.isobject() and hasattr(self.__C,"getI"):
1898 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1900 return None # Indispensable
1905 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1906 elif self.isvector():
1907 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1908 elif self.isscalar():
1909 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1910 elif self.isobject() and hasattr(self.__C,"getT"):
1911 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1913 raise AttributeError("the %s covariance matrix has no getT attribute."%(self.__name,))
1916 "Décomposition de Cholesky"
1918 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1919 elif self.isvector():
1920 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1921 elif self.isscalar():
1922 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1923 elif self.isobject() and hasattr(self.__C,"cholesky"):
1924 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1926 raise AttributeError("the %s covariance matrix has no cholesky attribute."%(self.__name,))
1928 def choleskyI(self):
1929 "Inversion de la décomposition de Cholesky"
1931 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.linalg.cholesky(self.__C)) )
1932 elif self.isvector():
1933 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1934 elif self.isscalar():
1935 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1936 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1937 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1939 raise AttributeError("the %s covariance matrix has no choleskyI attribute."%(self.__name,))
1942 "Racine carrée matricielle"
1945 return Covariance(self.__name+"C", asCovariance = numpy.real(scipy.linalg.sqrtm(self.__C)) )
1946 elif self.isvector():
1947 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1948 elif self.isscalar():
1949 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1950 elif self.isobject() and hasattr(self.__C,"sqrtm"):
1951 return Covariance(self.__name+"C", asCovObject = self.__C.sqrtm() )
1953 raise AttributeError("the %s covariance matrix has no sqrtm attribute."%(self.__name,))
1956 "Inversion de la racine carrée matricielle"
1959 return Covariance(self.__name+"H", asCovariance = numpy.linalg.inv(numpy.real(scipy.linalg.sqrtm(self.__C))) )
1960 elif self.isvector():
1961 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1962 elif self.isscalar():
1963 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1964 elif self.isobject() and hasattr(self.__C,"sqrtmI"):
1965 return Covariance(self.__name+"H", asCovObject = self.__C.sqrtmI() )
1967 raise AttributeError("the %s covariance matrix has no sqrtmI attribute."%(self.__name,))
1969 def diag(self, msize=None):
1970 "Diagonale de la matrice"
1972 return numpy.diag(self.__C)
1973 elif self.isvector():
1975 elif self.isscalar():
1977 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1979 return self.__C * numpy.ones(int(msize))
1980 elif self.isobject() and hasattr(self.__C,"diag"):
1981 return self.__C.diag()
1983 raise AttributeError("the %s covariance matrix has no diag attribute."%(self.__name,))
1985 def trace(self, msize=None):
1986 "Trace de la matrice"
1988 return numpy.trace(self.__C)
1989 elif self.isvector():
1990 return float(numpy.sum(self.__C))
1991 elif self.isscalar():
1993 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1995 return self.__C * int(msize)
1996 elif self.isobject():
1997 return self.__C.trace()
1999 raise AttributeError("the %s covariance matrix has no trace attribute."%(self.__name,))
2001 def asfullmatrix(self, msize=None):
2004 return numpy.asarray(self.__C)
2005 elif self.isvector():
2006 return numpy.asarray( numpy.diag(self.__C), float )
2007 elif self.isscalar():
2009 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
2011 return numpy.asarray( self.__C * numpy.eye(int(msize)), float )
2012 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
2013 return self.__C.asfullmatrix()
2015 raise AttributeError("the %s covariance matrix has no asfullmatrix attribute."%(self.__name,))
2017 def assparsematrix(self):
2025 "x.__repr__() <==> repr(x)"
2026 return repr(self.__C)
2029 "x.__str__() <==> str(x)"
2030 return str(self.__C)
2032 def __add__(self, other):
2033 "x.__add__(y) <==> x+y"
2034 if self.ismatrix() or self.isobject():
2035 return self.__C + numpy.asmatrix(other)
2036 elif self.isvector() or self.isscalar():
2037 _A = numpy.asarray(other)
2038 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
2039 return numpy.asmatrix(_A)
2041 def __radd__(self, other):
2042 "x.__radd__(y) <==> y+x"
2043 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
2045 def __sub__(self, other):
2046 "x.__sub__(y) <==> x-y"
2047 if self.ismatrix() or self.isobject():
2048 return self.__C - numpy.asmatrix(other)
2049 elif self.isvector() or self.isscalar():
2050 _A = numpy.asarray(other)
2051 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
2052 return numpy.asmatrix(_A)
2054 def __rsub__(self, other):
2055 "x.__rsub__(y) <==> y-x"
2056 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
2059 "x.__neg__() <==> -x"
2062 def __matmul__(self, other):
2063 "x.__mul__(y) <==> x@y"
2064 if self.ismatrix() and isinstance(other, (int, float)):
2065 return numpy.asarray(self.__C) * other
2066 elif self.ismatrix() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2067 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2068 return numpy.ravel(self.__C @ numpy.ravel(other))
2069 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2070 return numpy.asarray(self.__C) @ numpy.asarray(other)
2072 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asarray(other).shape,self.__name))
2073 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2074 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2075 return numpy.ravel(self.__C) * numpy.ravel(other)
2076 elif numpy.asarray(other).shape[0] == self.shape[1]: # Matrice
2077 return numpy.ravel(self.__C).reshape((-1,1)) * numpy.asarray(other)
2079 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2080 elif self.isscalar() and isinstance(other,numpy.matrix):
2081 return numpy.asarray(self.__C * other)
2082 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2083 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2084 return self.__C * numpy.ravel(other)
2086 return self.__C * numpy.asarray(other)
2087 elif self.isobject():
2088 return self.__C.__matmul__(other)
2090 raise NotImplementedError("%s covariance matrix __matmul__ method not available for %s type!"%(self.__name,type(other)))
2092 def __mul__(self, other):
2093 "x.__mul__(y) <==> x*y"
2094 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2095 return self.__C * other
2096 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2097 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2098 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2099 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2100 return self.__C * numpy.asmatrix(other)
2102 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
2103 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
2104 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2105 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
2106 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2107 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
2109 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
2110 elif self.isscalar() and isinstance(other,numpy.matrix):
2111 return self.__C * other
2112 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
2113 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
2114 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
2116 return self.__C * numpy.asmatrix(other)
2117 elif self.isobject():
2118 return self.__C.__mul__(other)
2120 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
2122 def __rmatmul__(self, other):
2123 "x.__rmul__(y) <==> y@x"
2124 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2125 return other * self.__C
2126 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2127 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2128 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2129 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2130 return numpy.asmatrix(other) * self.__C
2132 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2133 elif self.isvector() and isinstance(other,numpy.matrix):
2134 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2135 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2136 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2137 return numpy.asmatrix(numpy.array(other) * self.__C)
2139 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2140 elif self.isscalar() and isinstance(other,numpy.matrix):
2141 return other * self.__C
2142 elif self.isobject():
2143 return self.__C.__rmatmul__(other)
2145 raise NotImplementedError("%s covariance matrix __rmatmul__ method not available for %s type!"%(self.__name,type(other)))
2147 def __rmul__(self, other):
2148 "x.__rmul__(y) <==> y*x"
2149 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
2150 return other * self.__C
2151 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
2152 if numpy.ravel(other).size == self.shape[1]: # Vecteur
2153 return numpy.asmatrix(numpy.ravel(other)) * self.__C
2154 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
2155 return numpy.asmatrix(other) * self.__C
2157 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
2158 elif self.isvector() and isinstance(other,numpy.matrix):
2159 if numpy.ravel(other).size == self.shape[0]: # Vecteur
2160 return numpy.asmatrix(numpy.ravel(other) * self.__C)
2161 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
2162 return numpy.asmatrix(numpy.array(other) * self.__C)
2164 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
2165 elif self.isscalar() and isinstance(other,numpy.matrix):
2166 return other * self.__C
2167 elif self.isobject():
2168 return self.__C.__rmul__(other)
2170 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
2173 "x.__len__() <==> len(x)"
2174 return self.shape[0]
2176 # ==============================================================================
2177 class Observer2Func(object):
2179 Création d'une fonction d'observateur a partir de son texte
2181 def __init__(self, corps=""):
2182 self.__corps = corps
2183 def func(self,var,info):
2184 "Fonction d'observation"
2187 "Restitution du pointeur de fonction dans l'objet"
2190 # ==============================================================================
2191 class CaseLogger(object):
2193 Conservation des commandes de création d'un cas
2195 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
2196 self.__name = str(__name)
2197 self.__objname = str(__objname)
2198 self.__logSerie = []
2199 self.__switchoff = False
2201 "TUI" :Interfaces._TUIViewer,
2202 "SCD" :Interfaces._SCDViewer,
2203 "YACS":Interfaces._YACSViewer,
2204 "SimpleReportInRst":Interfaces._SimpleReportInRstViewer,
2205 "SimpleReportInHtml":Interfaces._SimpleReportInHtmlViewer,
2206 "SimpleReportInPlainTxt":Interfaces._SimpleReportInPlainTxtViewer,
2209 "TUI" :Interfaces._TUIViewer,
2210 "COM" :Interfaces._COMViewer,
2212 if __addViewers is not None:
2213 self.__viewers.update(dict(__addViewers))
2214 if __addLoaders is not None:
2215 self.__loaders.update(dict(__addLoaders))
2217 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
2218 "Enregistrement d'une commande individuelle"
2219 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
2220 if "self" in __keys: __keys.remove("self")
2221 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
2223 self.__switchoff = True
2225 self.__switchoff = False
2227 def dump(self, __filename=None, __format="TUI", __upa=""):
2228 "Restitution normalisée des commandes"
2229 if __format in self.__viewers:
2230 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
2232 raise ValueError("Dumping as \"%s\" is not available"%__format)
2233 return __formater.dump(__filename, __upa)
2235 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
2236 "Chargement normalisé des commandes"
2237 if __format in self.__loaders:
2238 __formater = self.__loaders[__format]()
2240 raise ValueError("Loading as \"%s\" is not available"%__format)
2241 return __formater.load(__filename, __content, __object)
2243 # ==============================================================================
2246 _extraArguments = None,
2247 _sFunction = lambda x: x,
2252 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
2253 correspondante de valeurs de la fonction en argument
2255 # Vérifications et définitions initiales
2256 # logging.debug("MULTF Internal multifonction calculations begin with function %s"%(_sFunction.__name__,))
2257 if not PlatformInfo.isIterable( __xserie ):
2258 raise TypeError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
2260 if (_mpWorkers is None) or (_mpWorkers is not None and _mpWorkers < 1):
2263 __mpWorkers = int(_mpWorkers)
2265 import multiprocessing
2276 # logging.debug("MULTF Internal multiprocessing calculations begin : evaluation of %i point(s)"%(len(_jobs),))
2277 import multiprocessing
2278 with multiprocessing.Pool(__mpWorkers) as pool:
2279 __multiHX = pool.map( _sFunction, _jobs )
2282 # logging.debug("MULTF Internal multiprocessing calculation end")
2284 # logging.debug("MULTF Internal monoprocessing calculation begin")
2286 if _extraArguments is None:
2287 for __xvalue in __xserie:
2288 __multiHX.append( _sFunction( __xvalue ) )
2289 elif _extraArguments is not None and isinstance(_extraArguments, (list, tuple, map)):
2290 for __xvalue in __xserie:
2291 __multiHX.append( _sFunction( __xvalue, *_extraArguments ) )
2292 elif _extraArguments is not None and isinstance(_extraArguments, dict):
2293 for __xvalue in __xserie:
2294 __multiHX.append( _sFunction( __xvalue, **_extraArguments ) )
2296 raise TypeError("MultiFonction extra arguments unkown input type: %s"%(type(_extraArguments),))
2297 # logging.debug("MULTF Internal monoprocessing calculation end")
2299 # logging.debug("MULTF Internal multifonction calculations end")
2302 # ==============================================================================
2303 def CostFunction3D(_x,
2304 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
2305 _HmX = None, # Simulation déjà faite de Hm(x)
2306 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
2311 _SIV = False, # A résorber pour la 8.0
2312 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
2313 _nPS = 0, # nbPreviousSteps
2314 _QM = "DA", # QualityMeasure
2315 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
2316 _fRt = False, # Restitue ou pas la sortie étendue
2317 _sSc = True, # Stocke ou pas les SSC
2320 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
2321 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
2322 DFO, QuantileRegression
2328 for k in ["CostFunctionJ",
2334 "SimulatedObservationAtCurrentOptimum",
2335 "SimulatedObservationAtCurrentState",
2339 if hasattr(_SSV[k],"store"):
2340 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
2342 _X = numpy.asmatrix(numpy.ravel( _x )).T
2343 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
2344 _SSV["CurrentState"].append( _X )
2346 if _HmX is not None:
2350 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
2354 _HX = _Hm( _X, *_arg )
2355 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
2357 if "SimulatedObservationAtCurrentState" in _SSC or \
2358 "SimulatedObservationAtCurrentOptimum" in _SSC:
2359 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
2361 if numpy.any(numpy.isnan(_HX)):
2362 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
2364 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
2365 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
2366 if _BI is None or _RI is None:
2367 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
2368 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
2369 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
2370 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2371 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
2373 raise ValueError("Observation error covariance matrix has to be properly defined!")
2375 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
2376 elif _QM in ["LeastSquares", "LS", "L2"]:
2378 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
2379 elif _QM in ["AbsoluteValue", "L1"]:
2381 Jo = numpy.sum( numpy.abs(_Y - _HX) )
2382 elif _QM in ["MaximumError", "ME"]:
2384 Jo = numpy.max( numpy.abs(_Y - _HX) )
2385 elif _QM in ["QR", "Null"]:
2389 raise ValueError("Unknown asked quality measure!")
2391 J = float( Jb ) + float( Jo )
2394 _SSV["CostFunctionJb"].append( Jb )
2395 _SSV["CostFunctionJo"].append( Jo )
2396 _SSV["CostFunctionJ" ].append( J )
2398 if "IndexOfOptimum" in _SSC or \
2399 "CurrentOptimum" in _SSC or \
2400 "SimulatedObservationAtCurrentOptimum" in _SSC:
2401 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
2402 if "IndexOfOptimum" in _SSC:
2403 _SSV["IndexOfOptimum"].append( IndexMin )
2404 if "CurrentOptimum" in _SSC:
2405 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
2406 if "SimulatedObservationAtCurrentOptimum" in _SSC:
2407 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
2412 if _QM in ["QR"]: # Pour le QuantileRegression
2417 # ==============================================================================
2418 if __name__ == "__main__":
2419 print('\n AUTODIAGNOSTIC\n')