1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2018 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 Ce module est destiné à être appelée par AssimilationStudy.
28 __author__ = "Jean-Philippe ARGAUD"
36 from daCore import Persistence
37 from daCore import PlatformInfo
38 from daCore import Interfaces
39 from daCore import Templates
40 from daCore.Interfaces import ImportFromScript
42 # ==============================================================================
43 class CacheManager(object):
45 Classe générale de gestion d'un cache de calculs
48 toleranceInRedundancy = 1.e-18,
49 lenghtOfRedundancy = -1,
52 Les caractéristiques de tolérance peuvent être modifées à la création.
54 self.__tolerBP = float(toleranceInRedundancy)
55 self.__lenghtOR = int(lenghtOfRedundancy)
56 self.__initlnOR = self.__lenghtOR
61 self.__listOPCV = [] # Operator Previous Calculated Points, Results, Point Norms
62 # logging.debug("CM Tolerance de determination des doublons : %.2e", self.__tolerBP)
64 def wasCalculatedIn(self, xValue ): #, info="" ):
65 "Vérifie l'existence d'un calcul correspondant à la valeur"
68 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
69 if xValue.size != self.__listOPCV[i][0].size:
70 # logging.debug("CM Différence de la taille %s de X et de celle %s du point %i déjà calculé", xValue.shape,i,self.__listOPCP[i].shape)
72 if numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < self.__tolerBP * self.__listOPCV[i][2]:
74 __HxV = self.__listOPCV[i][1]
75 # logging.debug("CM Cas%s déja calculé, portant le numéro %i", info, i)
79 def storeValueInX(self, xValue, HxValue ):
80 "Stocke un calcul correspondant à la valeur"
81 if self.__lenghtOR < 0:
82 self.__lenghtOR = 2 * xValue.size + 2
83 self.__initlnOR = self.__lenghtOR
84 while len(self.__listOPCV) > self.__lenghtOR:
85 # logging.debug("CM Réduction de la liste des cas à %i éléments par suppression du premier", self.__lenghtOR)
86 self.__listOPCV.pop(0)
87 self.__listOPCV.append( (
88 copy.copy(numpy.ravel(xValue)),
90 numpy.linalg.norm(xValue),
95 self.__initlnOR = self.__lenghtOR
100 self.__lenghtOR = self.__initlnOR
102 # ==============================================================================
103 class Operator(object):
105 Classe générale d'interface de type opérateur simple
112 def __init__(self, fromMethod=None, fromMatrix=None, avoidingRedundancy = True):
114 On construit un objet de ce type en fournissant à l'aide de l'un des
115 deux mots-clé, soit une fonction python, soit une matrice.
117 - fromMethod : argument de type fonction Python
118 - fromMatrix : argument adapté au constructeur numpy.matrix
119 - avoidingRedundancy : évite ou pas les calculs redondants
121 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
122 self.__AvoidRC = bool( avoidingRedundancy )
123 if fromMethod is not None:
124 self.__Method = fromMethod # logtimer(fromMethod)
126 self.__Type = "Method"
127 elif fromMatrix is not None:
129 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
130 self.__Type = "Matrix"
136 def disableAvoidingRedundancy(self):
138 Operator.CM.disable()
140 def enableAvoidingRedundancy(self):
145 Operator.CM.disable()
151 def appliedTo(self, xValue, HValue = None):
153 Permet de restituer le résultat de l'application de l'opérateur à un
154 argument xValue. Cette méthode se contente d'appliquer, son argument
155 devant a priori être du bon type.
157 - xValue : argument adapté pour appliquer l'opérateur
159 if HValue is not None:
160 HxValue = numpy.asmatrix( numpy.ravel( HValue ) ).T
162 Operator.CM.storeValueInX(xValue,HxValue)
165 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xValue)
167 __alreadyCalculated = False
169 if __alreadyCalculated:
170 self.__addOneCacheCall()
173 if self.__Matrix is not None:
174 self.__addOneMatrixCall()
175 HxValue = self.__Matrix * xValue
177 self.__addOneMethodCall()
178 HxValue = self.__Method( xValue )
180 Operator.CM.storeValueInX(xValue,HxValue)
184 def appliedControledFormTo(self, paire ):
186 Permet de restituer le résultat de l'application de l'opérateur à une
187 paire (xValue, uValue). Cette méthode se contente d'appliquer, son
188 argument devant a priori être du bon type. Si la uValue est None,
189 on suppose que l'opérateur ne s'applique qu'à xValue.
191 - xValue : argument X adapté pour appliquer l'opérateur
192 - uValue : argument U adapté pour appliquer l'opérateur
194 assert len(paire) == 2, "Incorrect number of arguments"
195 xValue, uValue = paire
196 if self.__Matrix is not None:
197 self.__addOneMatrixCall()
198 return self.__Matrix * xValue
199 elif uValue is not None:
200 self.__addOneMethodCall()
201 return self.__Method( (xValue, uValue) )
203 self.__addOneMethodCall()
204 return self.__Method( xValue )
206 def appliedInXTo(self, paire ):
208 Permet de restituer le résultat de l'application de l'opérateur à un
209 argument xValue, sachant que l'opérateur est valable en xNominal.
210 Cette méthode se contente d'appliquer, son argument devant a priori
211 être du bon type. Si l'opérateur est linéaire car c'est une matrice,
212 alors il est valable en tout point nominal et il n'est pas nécessaire
214 Arguments : une liste contenant
215 - xNominal : argument permettant de donner le point où l'opérateur
216 est construit pour etre ensuite appliqué
217 - xValue : argument adapté pour appliquer l'opérateur
219 assert len(paire) == 2, "Incorrect number of arguments"
220 xNominal, xValue = paire
221 if self.__Matrix is not None:
222 self.__addOneMatrixCall()
223 return self.__Matrix * xValue
225 self.__addOneMethodCall()
226 return self.__Method( (xNominal, xValue) )
228 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue"):
230 Permet de renvoyer l'opérateur sous la forme d'une matrice
232 if self.__Matrix is not None:
233 self.__addOneMatrixCall()
235 elif ValueForMethodForm is not "UnknownVoidValue": # Ne pas utiliser "None"
236 self.__addOneMethodCall()
237 return numpy.matrix( self.__Method( (ValueForMethodForm, None) ) )
239 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
243 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
244 la forme d'une matrice
246 if self.__Matrix is not None:
247 return self.__Matrix.shape
249 raise ValueError("Matrix form of the operator is not available, nor the shape")
251 def nbcalls(self, which=None):
253 Renvoie les nombres d'évaluations de l'opérateur
256 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
257 self.__NbCallsAsMatrix,
258 self.__NbCallsAsMethod,
259 self.__NbCallsOfCached,
260 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
261 Operator.NbCallsAsMatrix,
262 Operator.NbCallsAsMethod,
263 Operator.NbCallsOfCached,
265 if which is None: return __nbcalls
266 else: return __nbcalls[which]
268 def __addOneMatrixCall(self):
269 "Comptabilise un appel"
270 self.__NbCallsAsMatrix += 1 # Decompte local
271 Operator.NbCallsAsMatrix += 1 # Decompte global
273 def __addOneMethodCall(self):
274 "Comptabilise un appel"
275 self.__NbCallsAsMethod += 1 # Decompte local
276 Operator.NbCallsAsMethod += 1 # Decompte global
278 def __addOneCacheCall(self):
279 "Comptabilise un appel"
280 self.__NbCallsOfCached += 1 # Decompte local
281 Operator.NbCallsOfCached += 1 # Decompte global
283 # ==============================================================================
284 class FullOperator(object):
286 Classe générale d'interface de type opérateur complet
287 (Direct, Linéaire Tangent, Adjoint)
290 name = "GenericFullOperator",
292 asOneFunction = None, # Fonction
293 asThreeFunctions = None, # Fonctions dictionary
294 asScript = None, # Fonction(s) script
295 asDict = None, # Parameters
302 self.__name = str(name)
303 self.__check = bool(toBeChecked)
308 if (asDict is not None) and isinstance(asDict, dict):
309 __Parameters.update( asDict )
310 if "DifferentialIncrement" in asDict:
311 __Parameters["withIncrement"] = asDict["DifferentialIncrement"]
312 if "CenteredFiniteDifference" in asDict:
313 __Parameters["withCenteredDF"] = asDict["CenteredFiniteDifference"]
314 if "EnableMultiProcessing" in asDict:
315 __Parameters["withmpEnabled"] = asDict["EnableMultiProcessing"]
316 if "NumberOfProcesses" in asDict:
317 __Parameters["withmpWorkers"] = asDict["NumberOfProcesses"]
319 if asScript is not None:
320 __Matrix, __Function = None, None
322 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
324 __Function = { "Direct":ImportFromScript(asScript).getvalue( "DirectOperator" ) }
325 __Function.update({"useApproximatedDerivatives":True})
326 __Function.update(__Parameters)
327 elif asThreeFunctions:
329 "Direct" :ImportFromScript(asScript).getvalue( "DirectOperator" ),
330 "Tangent":ImportFromScript(asScript).getvalue( "TangentOperator" ),
331 "Adjoint":ImportFromScript(asScript).getvalue( "AdjointOperator" ),
333 __Function.update(__Parameters)
336 if asOneFunction is not None:
337 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
338 if asOneFunction["Direct"] is not None:
339 __Function = asOneFunction
341 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
343 __Function = { "Direct":asOneFunction }
344 __Function.update({"useApproximatedDerivatives":True})
345 __Function.update(__Parameters)
346 elif asThreeFunctions is not None:
347 if isinstance(asThreeFunctions, dict) and \
348 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
349 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
350 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
351 __Function = asThreeFunctions
352 elif isinstance(asThreeFunctions, dict) and \
353 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
354 __Function = asThreeFunctions
355 __Function.update({"useApproximatedDerivatives":True})
357 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
358 if "Direct" not in asThreeFunctions:
359 __Function["Direct"] = asThreeFunctions["Tangent"]
360 __Function.update(__Parameters)
364 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
365 # for k in ("Direct", "Tangent", "Adjoint"):
366 # if k in __Function and hasattr(__Function[k],"__class__"):
367 # if type(__Function[k]) is type(self.__init__):
368 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
370 if appliedInX is not None and isinstance(appliedInX, dict):
371 __appliedInX = appliedInX
372 elif appliedInX is not None:
373 __appliedInX = {"HXb":appliedInX}
377 if scheduledBy is not None:
378 self.__T = scheduledBy
380 if isinstance(__Function, dict) and \
381 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
382 ("Direct" in __Function) and (__Function["Direct"] is not None):
383 if "withCenteredDF" not in __Function: __Function["withCenteredDF"] = False
384 if "withIncrement" not in __Function: __Function["withIncrement"] = 0.01
385 if "withdX" not in __Function: __Function["withdX"] = None
386 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = True
387 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
388 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
389 if "withmpEnabled" not in __Function: __Function["withmpEnabled"] = False
390 if "withmpWorkers" not in __Function: __Function["withmpWorkers"] = None
391 from daNumerics.ApproximatedDerivatives import FDApproximation
392 FDA = FDApproximation(
393 Function = __Function["Direct"],
394 centeredDF = __Function["withCenteredDF"],
395 increment = __Function["withIncrement"],
396 dX = __Function["withdX"],
397 avoidingRedundancy = __Function["withAvoidingRedundancy"],
398 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
399 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
400 mpEnabled = __Function["withmpEnabled"],
401 mpWorkers = __Function["withmpWorkers"],
403 self.__FO["Direct"] = Operator( fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC )
404 self.__FO["Tangent"] = Operator( fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC )
405 self.__FO["Adjoint"] = Operator( fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC )
406 elif isinstance(__Function, dict) and \
407 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
408 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
409 self.__FO["Direct"] = Operator( fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC )
410 self.__FO["Tangent"] = Operator( fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC )
411 self.__FO["Adjoint"] = Operator( fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC )
412 elif asMatrix is not None:
413 __matrice = numpy.matrix( __Matrix, numpy.float )
414 self.__FO["Direct"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC )
415 self.__FO["Tangent"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC )
416 self.__FO["Adjoint"] = Operator( fromMatrix = __matrice.T, avoidingRedundancy = avoidRC )
419 raise ValueError("Improperly defined observation operator, it requires at minima either a matrix, a Direct for approximate derivatives or a Tangent/Adjoint pair.")
421 if __appliedInX is not None:
422 self.__FO["AppliedInX"] = {}
423 if not isinstance(__appliedInX, dict):
424 raise ValueError("Error: observation operator defined by \"AppliedInX\" need a dictionary as argument.")
425 for key in list(__appliedInX.keys()):
426 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
427 # Pour le cas où l'on a une vraie matrice
428 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
429 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
430 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
431 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
433 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
435 self.__FO["AppliedInX"] = None
441 "x.__repr__() <==> repr(x)"
442 return repr(self.__V)
445 "x.__str__() <==> str(x)"
448 # ==============================================================================
449 class Algorithm(object):
451 Classe générale d'interface de type algorithme
453 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
454 d'assimilation, en fournissant un container (dictionnaire) de variables
455 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
457 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
459 def __init__(self, name):
461 L'initialisation présente permet de fabriquer des variables de stockage
462 disponibles de manière générique dans les algorithmes élémentaires. Ces
463 variables de stockage sont ensuite conservées dans un dictionnaire
464 interne à l'objet, mais auquel on accède par la méthode "get".
466 Les variables prévues sont :
467 - CostFunctionJ : fonction-cout globale, somme des deux parties suivantes
468 - CostFunctionJb : partie ébauche ou background de la fonction-cout
469 - CostFunctionJo : partie observations de la fonction-cout
470 - GradientOfCostFunctionJ : gradient de la fonction-cout globale
471 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-cout
472 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-cout
473 - CurrentState : état courant lors d'itérations
474 - Analysis : l'analyse Xa
475 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
476 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
477 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
478 - Innovation : l'innovation : d = Y - H(X)
479 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
480 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
481 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
482 - MahalanobisConsistency : indicateur de consistance des covariances
483 - OMA : Observation moins Analysis : Y - Xa
484 - OMB : Observation moins Background : Y - Xb
485 - AMB : Analysis moins Background : Xa - Xb
486 - APosterioriCovariance : matrice A
487 - APosterioriVariances : variances de la matrice A
488 - APosterioriStandardDeviations : écart-types de la matrice A
489 - APosterioriCorrelations : correlations de la matrice A
490 - Residu : dans le cas des algorithmes de vérification
491 On peut rajouter des variables à stocker dans l'initialisation de
492 l'algorithme élémentaire qui va hériter de cette classe
494 logging.debug("%s Initialisation", str(name))
495 self._m = PlatformInfo.SystemUsage()
497 self._name = str( name )
498 self._parameters = {"StoreSupplementaryCalculations":[]}
499 self.__required_parameters = {}
500 self.__required_inputs = {"RequiredInputValues":{"mandatory":(), "optional":()}}
502 self.StoredVariables = {}
503 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
504 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
505 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
506 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
507 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
508 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
509 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
510 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
511 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
512 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
513 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
514 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
515 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
516 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
517 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
518 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
519 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
520 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
521 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
522 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
523 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
524 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
525 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
526 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
527 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
528 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
529 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
530 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
531 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
532 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
533 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
535 def _pre_run(self, Parameters, Xb=None, Y=None, R=None, B=None, Q=None ):
537 logging.debug("%s Lancement", self._name)
538 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
540 # Mise a jour de self._parameters avec Parameters
541 self.__setParameters(Parameters)
543 # Corrections et complements
544 def __test_vvalue( argument, variable, argname):
546 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
547 raise ValueError("%s %s vector %s has to be properly defined!"%(self._name,argname,variable))
548 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
549 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,variable))
551 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,variable))
553 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,variable,numpy.array(argument).size))
554 __test_vvalue( Xb, "Xb", "Background or initial state" )
555 __test_vvalue( Y, "Y", "Observation" )
556 def __test_cvalue( argument, variable, argname):
558 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
559 raise ValueError("%s %s error covariance matrix %s has to be properly defined!"%(self._name,argname,variable))
560 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
561 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,variable))
563 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,variable))
565 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,variable))
566 __test_cvalue( R, "R", "Observation" )
567 __test_cvalue( B, "B", "Background" )
568 __test_cvalue( Q, "Q", "Evolution" )
570 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
571 logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
573 self._parameters["Bounds"] = None
575 if logging.getLogger().level < logging.WARNING:
576 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
577 if PlatformInfo.has_scipy:
578 import scipy.optimize
579 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
581 self._parameters["optmessages"] = 15
583 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
584 if PlatformInfo.has_scipy:
585 import scipy.optimize
586 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
588 self._parameters["optmessages"] = 15
592 def _post_run(self,_oH=None):
594 if ("StoreSupplementaryCalculations" in self._parameters) and \
595 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
596 for _A in self.StoredVariables["APosterioriCovariance"]:
597 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
598 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
599 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
600 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
601 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
602 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
603 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
604 self.StoredVariables["APosterioriCorrelations"].store( _C )
606 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
607 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
608 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
609 logging.debug("%s Terminé", self._name)
612 def get(self, key=None):
614 Renvoie l'une des variables stockées identifiée par la clé, ou le
615 dictionnaire de l'ensemble des variables disponibles en l'absence de
616 clé. Ce sont directement les variables sous forme objet qui sont
617 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
618 des classes de persistance.
621 return self.StoredVariables[key]
623 return self.StoredVariables
625 def __contains__(self, key=None):
626 "D.__contains__(k) -> True if D has a key k, else False"
627 return key in self.StoredVariables
630 "D.keys() -> list of D's keys"
631 if hasattr(self, "StoredVariables"):
632 return self.StoredVariables.keys()
637 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
638 if hasattr(self, "StoredVariables"):
639 return self.StoredVariables.pop(k, d)
644 raise TypeError("pop expected at least 1 arguments, got 0")
645 "If key is not found, d is returned if given, otherwise KeyError is raised"
651 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
653 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
654 sa forme mathématique la plus naturelle possible.
656 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
658 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None):
660 Permet de définir dans l'algorithme des paramètres requis et leurs
661 caractéristiques par défaut.
664 raise ValueError("A name is mandatory to define a required parameter.")
666 self.__required_parameters[name] = {
668 "typecast" : typecast,
674 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
676 def getRequiredParameters(self, noDetails=True):
678 Renvoie la liste des noms de paramètres requis ou directement le
679 dictionnaire des paramètres requis.
682 return sorted(self.__required_parameters.keys())
684 return self.__required_parameters
686 def setParameterValue(self, name=None, value=None):
688 Renvoie la valeur d'un paramètre requis de manière contrôlée
690 default = self.__required_parameters[name]["default"]
691 typecast = self.__required_parameters[name]["typecast"]
692 minval = self.__required_parameters[name]["minval"]
693 maxval = self.__required_parameters[name]["maxval"]
694 listval = self.__required_parameters[name]["listval"]
696 if value is None and default is None:
698 elif value is None and default is not None:
699 if typecast is None: __val = default
700 else: __val = typecast( default )
702 if typecast is None: __val = value
703 else: __val = typecast( value )
705 if minval is not None and (numpy.array(__val, float) < minval).any():
706 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be less than %s."%(name, __val, minval))
707 if maxval is not None and (numpy.array(__val, float) > maxval).any():
708 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be greater than %s."%(name, __val, maxval))
709 if listval is not None:
710 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
713 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%(v, name, listval))
714 elif __val not in listval:
715 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%( __val, name,listval))
718 def requireInputArguments(self, mandatory=(), optional=()):
720 Permet d'imposer des arguments requises en entrée
722 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
723 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
725 def __setParameters(self, fromDico={}):
727 Permet de stocker les paramètres reçus dans le dictionnaire interne.
729 self._parameters.update( fromDico )
730 for k in self.__required_parameters.keys():
731 if k in fromDico.keys():
732 self._parameters[k] = self.setParameterValue(k,fromDico[k])
734 self._parameters[k] = self.setParameterValue(k)
735 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
737 # ==============================================================================
738 class AlgorithmAndParameters(object):
740 Classe générale d'interface d'action pour l'algorithme et ses paramètres
743 name = "GenericAlgorithm",
750 self.__name = str(name)
754 self.__algorithm = {}
755 self.__algorithmFile = None
756 self.__algorithmName = None
758 self.updateParameters( asDict, asScript )
760 if asAlgorithm is None and asScript is not None:
761 __Algo = ImportFromScript(asScript).getvalue( "Algorithm" )
765 if __Algo is not None:
766 self.__A = str(__Algo)
767 self.__P.update( {"Algorithm":self.__A} )
769 self.__setAlgorithm( self.__A )
771 def updateParameters(self,
775 "Mise a jour des parametres"
776 if asDict is None and asScript is not None:
777 __Dict = ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
781 if __Dict is not None:
782 self.__P.update( dict(__Dict) )
784 def executePythonScheme(self, asDictAO = None):
785 "Permet de lancer le calcul d'assimilation"
786 Operator.CM.clearCache()
788 if not isinstance(asDictAO, dict):
789 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
790 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
791 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
792 else: self.__Xb = None
793 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
794 else: self.__Y = asDictAO["Observation"]
795 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
796 else: self.__U = asDictAO["ControlInput"]
797 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
798 else: self.__HO = asDictAO["ObservationOperator"]
799 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
800 else: self.__EM = asDictAO["EvolutionModel"]
801 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
802 else: self.__CM = asDictAO["ControlModel"]
803 self.__B = asDictAO["BackgroundError"]
804 self.__R = asDictAO["ObservationError"]
805 self.__Q = asDictAO["EvolutionError"]
807 self.__shape_validate()
809 self.__algorithm.run(
819 Parameters = self.__P,
823 def executeYACSScheme(self, FileName=None):
824 "Permet de lancer le calcul d'assimilation"
825 if FileName is None or not os.path.exists(FileName):
826 raise ValueError("a YACS file name has to be given for YACS execution.\n")
827 if not PlatformInfo.has_salome or \
828 not PlatformInfo.has_yacs or \
829 not PlatformInfo.has_adao:
830 raise ImportError("\n\n"+\
831 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
832 "Please load the right environnement before trying to use it.\n")
837 SALOMERuntime.RuntimeSALOME_setRuntime()
839 r = pilot.getRuntime()
840 xmlLoader = loader.YACSLoader()
841 xmlLoader.registerProcCataLoader()
843 catalogAd = r.loadCatalog("proc", os.path.abspath(FileName))
844 r.addCatalog(catalogAd)
849 p = xmlLoader.load(os.path.abspath(FileName))
850 except IOError as ex:
851 print("The YACS XML schema file can not be loaded: %s"%(ex,))
853 logger = p.getLogger("parser")
854 if not logger.isEmpty():
855 print("The imported YACS XML schema has errors on parsing:")
856 print(logger.getStr())
859 print("The YACS XML schema is not valid and will not be executed:")
860 print(p.getErrorReport())
862 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
863 p.checkConsistency(info)
864 if info.areWarningsOrErrors():
865 print("The YACS XML schema is not coherent and will not be executed:")
866 print(info.getGlobalRepr())
868 e = pilot.ExecutorSwig()
870 if p.getEffectiveState() != pilot.DONE:
871 print(p.getErrorReport())
875 def get(self, key = None):
876 "Vérifie l'existence d'une clé de variable ou de paramètres"
877 if key in self.__algorithm:
878 return self.__algorithm.get( key )
879 elif key in self.__P:
885 "Necessaire pour le pickling"
886 return self.__algorithm.pop(k, d)
888 def getAlgorithmRequiredParameters(self, noDetails=True):
889 "Renvoie la liste des paramètres requis selon l'algorithme"
890 return self.__algorithm.getRequiredParameters(noDetails)
892 def setObserver(self, __V, __O, __I, __S):
893 if self.__algorithm is None \
894 or isinstance(self.__algorithm, dict) \
895 or not hasattr(self.__algorithm,"StoredVariables"):
896 raise ValueError("No observer can be build before choosing an algorithm.")
897 if __V not in self.__algorithm:
898 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
900 self.__algorithm.StoredVariables[ __V ].setDataObserver(
903 HookParameters = __I,
906 def removeObserver(self, __V, __O, __A = False):
907 if self.__algorithm is None \
908 or isinstance(self.__algorithm, dict) \
909 or not hasattr(self.__algorithm,"StoredVariables"):
910 raise ValueError("No observer can be removed before choosing an algorithm.")
911 if __V not in self.__algorithm:
912 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
914 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
919 def hasObserver(self, __V):
920 if self.__algorithm is None \
921 or isinstance(self.__algorithm, dict) \
922 or not hasattr(self.__algorithm,"StoredVariables"):
924 if __V not in self.__algorithm:
926 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
929 return list(self.__algorithm.keys()) + list(self.__P.keys())
931 def __contains__(self, key=None):
932 "D.__contains__(k) -> True if D has a key k, else False"
933 return key in self.__algorithm or key in self.__P
936 "x.__repr__() <==> repr(x)"
937 return repr(self.__A)+", "+repr(self.__P)
940 "x.__str__() <==> str(x)"
941 return str(self.__A)+", "+str(self.__P)
943 def __setAlgorithm(self, choice = None ):
945 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
946 d'assimilation. L'argument est un champ caractère se rapportant au nom
947 d'un algorithme réalisant l'opération sur les arguments fixes.
950 raise ValueError("Error: algorithm choice has to be given")
951 if self.__algorithmName is not None:
952 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
953 daDirectory = "daAlgorithms"
955 # Recherche explicitement le fichier complet
956 # ------------------------------------------
958 for directory in sys.path:
959 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
960 module_path = os.path.abspath(os.path.join(directory, daDirectory))
961 if module_path is None:
962 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
964 # Importe le fichier complet comme un module
965 # ------------------------------------------
967 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
968 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
969 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
970 raise ImportError("this module does not define a valid elementary algorithm.")
971 self.__algorithmName = str(choice)
972 sys.path = sys_path_tmp ; del sys_path_tmp
973 except ImportError as e:
974 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
976 # Instancie un objet du type élémentaire du fichier
977 # -------------------------------------------------
978 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
981 def __shape_validate(self):
983 Validation de la correspondance correcte des tailles des variables et
984 des matrices s'il y en a.
986 if self.__Xb is None: __Xb_shape = (0,)
987 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
988 elif hasattr(self.__Xb,"shape"):
989 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
990 else: __Xb_shape = self.__Xb.shape()
991 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
993 if self.__Y is None: __Y_shape = (0,)
994 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
995 elif hasattr(self.__Y,"shape"):
996 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
997 else: __Y_shape = self.__Y.shape()
998 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1000 if self.__U is None: __U_shape = (0,)
1001 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1002 elif hasattr(self.__U,"shape"):
1003 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1004 else: __U_shape = self.__U.shape()
1005 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1007 if self.__B is None: __B_shape = (0,0)
1008 elif hasattr(self.__B,"shape"):
1009 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1010 else: __B_shape = self.__B.shape()
1011 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1013 if self.__R is None: __R_shape = (0,0)
1014 elif hasattr(self.__R,"shape"):
1015 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1016 else: __R_shape = self.__R.shape()
1017 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1019 if self.__Q is None: __Q_shape = (0,0)
1020 elif hasattr(self.__Q,"shape"):
1021 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1022 else: __Q_shape = self.__Q.shape()
1023 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1025 if len(self.__HO) == 0: __HO_shape = (0,0)
1026 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1027 elif hasattr(self.__HO["Direct"],"shape"):
1028 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1029 else: __HO_shape = self.__HO["Direct"].shape()
1030 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1032 if len(self.__EM) == 0: __EM_shape = (0,0)
1033 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1034 elif hasattr(self.__EM["Direct"],"shape"):
1035 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1036 else: __EM_shape = self.__EM["Direct"].shape()
1037 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1039 if len(self.__CM) == 0: __CM_shape = (0,0)
1040 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1041 elif hasattr(self.__CM["Direct"],"shape"):
1042 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1043 else: __CM_shape = self.__CM["Direct"].shape()
1044 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1046 # Vérification des conditions
1047 # ---------------------------
1048 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1049 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1050 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1051 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1053 if not( min(__B_shape) == max(__B_shape) ):
1054 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1055 if not( min(__R_shape) == max(__R_shape) ):
1056 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1057 if not( min(__Q_shape) == max(__Q_shape) ):
1058 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1059 if not( min(__EM_shape) == max(__EM_shape) ):
1060 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1062 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1063 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1064 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1065 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1066 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1067 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1068 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1069 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1071 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1072 if self.__algorithmName in ["EnsembleBlue",]:
1073 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1074 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1075 for member in asPersistentVector:
1076 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1077 __Xb_shape = min(__B_shape)
1079 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1081 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1082 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1084 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1085 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1087 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1088 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1090 if ("Bounds" in self.__P) \
1091 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1092 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1093 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1094 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1098 # ==============================================================================
1099 class RegulationAndParameters(object):
1101 Classe générale d'interface d'action pour la régulation et ses paramètres
1104 name = "GenericRegulation",
1111 self.__name = str(name)
1114 if asAlgorithm is None and asScript is not None:
1115 __Algo = ImportFromScript(asScript).getvalue( "Algorithm" )
1117 __Algo = asAlgorithm
1119 if asDict is None and asScript is not None:
1120 __Dict = ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1124 if __Dict is not None:
1125 self.__P.update( dict(__Dict) )
1127 if __Algo is not None:
1128 self.__P.update( {"Algorithm":self.__A} )
1130 def get(self, key = None):
1131 "Vérifie l'existence d'une clé de variable ou de paramètres"
1133 return self.__P[key]
1137 # ==============================================================================
1138 class DataObserver(object):
1140 Classe générale d'interface de type observer
1143 name = "GenericObserver",
1155 self.__name = str(name)
1160 if onVariable is None:
1161 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1162 elif type(onVariable) in (tuple, list):
1163 self.__V = tuple(map( str, onVariable ))
1164 if withInfo is None:
1167 self.__I = (str(withInfo),)*len(self.__V)
1168 elif isinstance(onVariable, str):
1169 self.__V = (onVariable,)
1170 if withInfo is None:
1171 self.__I = (onVariable,)
1173 self.__I = (str(withInfo),)
1175 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1177 if asString is not None:
1178 __FunctionText = asString
1179 elif (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1180 __FunctionText = Templates.ObserverTemplates[asTemplate]
1181 elif asScript is not None:
1182 __FunctionText = ImportFromScript(asScript).getstring()
1185 __Function = ObserverF(__FunctionText)
1187 if asObsObject is not None:
1188 self.__O = asObsObject
1190 self.__O = __Function.getfunc()
1192 for k in range(len(self.__V)):
1195 if ename not in withAlgo:
1196 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1198 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1201 "x.__repr__() <==> repr(x)"
1202 return repr(self.__V)+"\n"+repr(self.__O)
1205 "x.__str__() <==> str(x)"
1206 return str(self.__V)+"\n"+str(self.__O)
1208 # ==============================================================================
1209 class State(object):
1211 Classe générale d'interface de type état
1214 name = "GenericVector",
1216 asPersistentVector = None,
1219 toBeChecked = False,
1222 Permet de définir un vecteur :
1223 - asVector : entrée des données, comme un vecteur compatible avec le
1224 constructeur de numpy.matrix, ou "True" si entrée par script.
1225 - asPersistentVector : entrée des données, comme une série de vecteurs
1226 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1227 type Persistence, ou "True" si entrée par script.
1228 - asScript : si un script valide est donné contenant une variable
1229 nommée "name", la variable est de type "asVector" (par défaut) ou
1230 "asPersistentVector" selon que l'une de ces variables est placée à
1233 self.__name = str(name)
1234 self.__check = bool(toBeChecked)
1238 self.__is_vector = False
1239 self.__is_series = False
1241 if asScript is not None:
1242 __Vector, __Series = None, None
1243 if asPersistentVector:
1244 __Series = ImportFromScript(asScript).getvalue( self.__name )
1246 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1248 __Vector, __Series = asVector, asPersistentVector
1250 if __Vector is not None:
1251 self.__is_vector = True
1252 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1253 self.shape = self.__V.shape
1254 self.size = self.__V.size
1255 elif __Series is not None:
1256 self.__is_series = True
1257 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1258 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1259 if isinstance(__Series, str): __Series = eval(__Series)
1260 for member in __Series:
1261 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1262 import sys ; sys.stdout.flush()
1265 if isinstance(self.__V.shape, (tuple, list)):
1266 self.shape = self.__V.shape
1268 self.shape = self.__V.shape()
1269 if len(self.shape) == 1:
1270 self.shape = (self.shape[0],1)
1271 self.size = self.shape[0] * self.shape[1]
1273 raise ValueError("The %s object is improperly defined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1275 if scheduledBy is not None:
1276 self.__T = scheduledBy
1278 def getO(self, withScheduler=False):
1280 return self.__V, self.__T
1281 elif self.__T is None:
1287 "Vérification du type interne"
1288 return self.__is_vector
1291 "Vérification du type interne"
1292 return self.__is_series
1295 "x.__repr__() <==> repr(x)"
1296 return repr(self.__V)
1299 "x.__str__() <==> str(x)"
1300 return str(self.__V)
1302 # ==============================================================================
1303 class Covariance(object):
1305 Classe générale d'interface de type covariance
1308 name = "GenericCovariance",
1309 asCovariance = None,
1310 asEyeByScalar = None,
1311 asEyeByVector = None,
1314 toBeChecked = False,
1317 Permet de définir une covariance :
1318 - asCovariance : entrée des données, comme une matrice compatible avec
1319 le constructeur de numpy.matrix
1320 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1321 multiplicatif d'une matrice de corrélation identité, aucune matrice
1322 n'étant donc explicitement à donner
1323 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1324 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1325 n'étant donc explicitement à donner
1326 - asCovObject : entrée des données comme un objet python, qui a les
1327 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1328 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1329 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1330 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1331 pleine doit être vérifié
1333 self.__name = str(name)
1334 self.__check = bool(toBeChecked)
1337 self.__is_scalar = False
1338 self.__is_vector = False
1339 self.__is_matrix = False
1340 self.__is_object = False
1342 if asScript is not None:
1343 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1345 __Scalar = ImportFromScript(asScript).getvalue( self.__name )
1347 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1349 __Object = ImportFromScript(asScript).getvalue( self.__name )
1351 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
1353 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1355 if __Scalar is not None:
1356 if numpy.matrix(__Scalar).size != 1:
1357 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size)
1358 self.__is_scalar = True
1359 self.__C = numpy.abs( float(__Scalar) )
1362 elif __Vector is not None:
1363 self.__is_vector = True
1364 self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) )
1365 self.shape = (self.__C.size,self.__C.size)
1366 self.size = self.__C.size**2
1367 elif __Matrix is not None:
1368 self.__is_matrix = True
1369 self.__C = numpy.matrix( __Matrix, float )
1370 self.shape = self.__C.shape
1371 self.size = self.__C.size
1372 elif __Object is not None:
1373 self.__is_object = True
1375 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"):
1376 if not hasattr(self.__C,at):
1377 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1378 if hasattr(self.__C,"shape"):
1379 self.shape = self.__C.shape
1382 if hasattr(self.__C,"size"):
1383 self.size = self.__C.size
1388 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1392 def __validate(self):
1394 if self.__C is None:
1395 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1396 if self.ismatrix() and min(self.shape) != max(self.shape):
1397 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1398 if self.isobject() and min(self.shape) != max(self.shape):
1399 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1400 if self.isscalar() and self.__C <= 0:
1401 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1402 if self.isvector() and (self.__C <= 0).any():
1403 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1404 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1406 L = numpy.linalg.cholesky( self.__C )
1408 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1409 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1411 L = self.__C.cholesky()
1413 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1416 "Vérification du type interne"
1417 return self.__is_scalar
1420 "Vérification du type interne"
1421 return self.__is_vector
1424 "Vérification du type interne"
1425 return self.__is_matrix
1428 "Vérification du type interne"
1429 return self.__is_object
1434 return Covariance(self.__name+"I", asCovariance = self.__C.I )
1435 elif self.isvector():
1436 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1437 elif self.isscalar():
1438 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1439 elif self.isobject():
1440 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1442 return None # Indispensable
1447 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1448 elif self.isvector():
1449 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1450 elif self.isscalar():
1451 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1452 elif self.isobject():
1453 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1456 "Décomposition de Cholesky"
1458 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1459 elif self.isvector():
1460 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1461 elif self.isscalar():
1462 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1463 elif self.isobject() and hasattr(self.__C,"cholesky"):
1464 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1466 def choleskyI(self):
1467 "Inversion de la décomposition de Cholesky"
1469 return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I )
1470 elif self.isvector():
1471 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1472 elif self.isscalar():
1473 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1474 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1475 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1477 def diag(self, msize=None):
1478 "Diagonale de la matrice"
1480 return numpy.diag(self.__C)
1481 elif self.isvector():
1483 elif self.isscalar():
1485 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1487 return self.__C * numpy.ones(int(msize))
1488 elif self.isobject():
1489 return self.__C.diag()
1491 def asfullmatrix(self, msize=None):
1495 elif self.isvector():
1496 return numpy.matrix( numpy.diag(self.__C), float )
1497 elif self.isscalar():
1499 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1501 return numpy.matrix( self.__C * numpy.eye(int(msize)), float )
1502 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1503 return self.__C.asfullmatrix()
1505 def trace(self, msize=None):
1506 "Trace de la matrice"
1508 return numpy.trace(self.__C)
1509 elif self.isvector():
1510 return float(numpy.sum(self.__C))
1511 elif self.isscalar():
1513 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1515 return self.__C * int(msize)
1516 elif self.isobject():
1517 return self.__C.trace()
1523 "x.__repr__() <==> repr(x)"
1524 return repr(self.__C)
1527 "x.__str__() <==> str(x)"
1528 return str(self.__C)
1530 def __add__(self, other):
1531 "x.__add__(y) <==> x+y"
1532 if self.ismatrix() or self.isobject():
1533 return self.__C + numpy.asmatrix(other)
1534 elif self.isvector() or self.isscalar():
1535 _A = numpy.asarray(other)
1536 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
1537 return numpy.asmatrix(_A)
1539 def __radd__(self, other):
1540 "x.__radd__(y) <==> y+x"
1541 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
1543 def __sub__(self, other):
1544 "x.__sub__(y) <==> x-y"
1545 if self.ismatrix() or self.isobject():
1546 return self.__C - numpy.asmatrix(other)
1547 elif self.isvector() or self.isscalar():
1548 _A = numpy.asarray(other)
1549 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
1550 return numpy.asmatrix(_A)
1552 def __rsub__(self, other):
1553 "x.__rsub__(y) <==> y-x"
1554 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
1557 "x.__neg__() <==> -x"
1560 def __mul__(self, other):
1561 "x.__mul__(y) <==> x*y"
1562 if self.ismatrix() and isinstance(other,numpy.matrix):
1563 return self.__C * other
1564 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1565 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1566 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1567 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1568 return self.__C * numpy.asmatrix(other)
1570 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
1571 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
1572 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1573 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
1574 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1575 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
1577 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1578 elif self.isscalar() and isinstance(other,numpy.matrix):
1579 return self.__C * other
1580 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
1581 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
1582 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1584 return self.__C * numpy.asmatrix(other)
1585 elif self.isobject():
1586 return self.__C.__mul__(other)
1588 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
1590 def __rmul__(self, other):
1591 "x.__rmul__(y) <==> y*x"
1592 if self.ismatrix() and isinstance(other,numpy.matrix):
1593 return other * self.__C
1594 elif self.isvector() and isinstance(other,numpy.matrix):
1595 if numpy.ravel(other).size == self.shape[0]: # Vecteur
1596 return numpy.asmatrix(numpy.ravel(other) * self.__C)
1597 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
1598 return numpy.asmatrix(numpy.array(other) * self.__C)
1600 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1601 elif self.isscalar() and isinstance(other,numpy.matrix):
1602 return other * self.__C
1603 elif self.isobject():
1604 return self.__C.__rmul__(other)
1606 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
1609 "x.__len__() <==> len(x)"
1610 return self.shape[0]
1612 # ==============================================================================
1613 class ObserverF(object):
1615 Creation d'une fonction d'observateur a partir de son texte
1617 def __init__(self, corps=""):
1618 self.__corps = corps
1619 def func(self,var,info):
1620 "Fonction d'observation"
1623 "Restitution du pointeur de fonction dans l'objet"
1626 # ==============================================================================
1627 class CaseLogger(object):
1629 Conservation des commandes de creation d'un cas
1631 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
1632 self.__name = str(__name)
1633 self.__objname = str(__objname)
1634 self.__logSerie = []
1635 self.__switchoff = False
1637 "TUI" :Interfaces._TUIViewer,
1638 "SCD" :Interfaces._SCDViewer,
1639 "YACS":Interfaces._YACSViewer,
1642 "TUI" :Interfaces._TUIViewer,
1643 "COM" :Interfaces._COMViewer,
1645 if __addViewers is not None:
1646 self.__viewers.update(dict(__addViewers))
1647 if __addLoaders is not None:
1648 self.__loaders.update(dict(__addLoaders))
1650 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
1651 "Enregistrement d'une commande individuelle"
1652 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
1653 if "self" in __keys: __keys.remove("self")
1654 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
1656 self.__switchoff = True
1658 self.__switchoff = False
1660 def dump(self, __filename=None, __format="TUI", __upa=""):
1661 "Restitution normalisée des commandes"
1662 if __format in self.__viewers:
1663 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
1665 raise ValueError("Dumping as \"%s\" is not available"%__format)
1666 return __formater.dump(__filename, __upa)
1668 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
1669 "Chargement normalisé des commandes"
1670 if __format in self.__loaders:
1671 __formater = self.__loaders[__format]()
1673 raise ValueError("Loading as \"%s\" is not available"%__format)
1674 return __formater.load(__filename, __content, __object)
1676 # ==============================================================================
1677 def CostFunction3D(_x,
1678 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
1679 _HmX = None, # Simulation déjà faite de Hm(x)
1680 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
1685 _SIV = False, # A résorber pour la 8.0
1686 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
1687 _nPS = 0, # nbPreviousSteps
1688 _QM = "DA", # QualityMeasure
1689 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
1690 _fRt = False, # Restitue ou pas la sortie étendue
1691 _sSc = True, # Stocke ou pas les SSC
1694 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
1695 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
1696 DFO, QuantileRegression
1702 for k in ["CostFunctionJ",
1708 "SimulatedObservationAtCurrentOptimum",
1709 "SimulatedObservationAtCurrentState",
1713 if hasattr(_SSV[k],"store"):
1714 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
1716 _X = numpy.asmatrix(numpy.ravel( _x )).T
1717 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
1718 _SSV["CurrentState"].append( _X )
1720 if _HmX is not None:
1724 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
1728 _HX = _Hm( _X, *_arg )
1729 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
1731 if "SimulatedObservationAtCurrentState" in _SSC or \
1732 "SimulatedObservationAtCurrentOptimum" in _SSC:
1733 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
1735 if numpy.any(numpy.isnan(_HX)):
1736 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
1738 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
1739 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
1740 if _BI is None or _RI is None:
1741 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
1742 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
1743 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
1744 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1745 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
1747 raise ValueError("Observation error covariance matrix has to be properly defined!")
1749 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1750 elif _QM in ["LeastSquares", "LS", "L2"]:
1752 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
1753 elif _QM in ["AbsoluteValue", "L1"]:
1755 Jo = numpy.sum( numpy.abs(_Y - _HX) )
1756 elif _QM in ["MaximumError", "ME"]:
1758 Jo = numpy.max( numpy.abs(_Y - _HX) )
1759 elif _QM in ["QR", "Null"]:
1763 raise ValueError("Unknown asked quality measure!")
1765 J = float( Jb ) + float( Jo )
1768 _SSV["CostFunctionJb"].append( Jb )
1769 _SSV["CostFunctionJo"].append( Jo )
1770 _SSV["CostFunctionJ" ].append( J )
1772 if "IndexOfOptimum" in _SSC or \
1773 "CurrentOptimum" in _SSC or \
1774 "SimulatedObservationAtCurrentOptimum" in _SSC:
1775 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
1776 if "IndexOfOptimum" in _SSC:
1777 _SSV["IndexOfOptimum"].append( IndexMin )
1778 if "CurrentOptimum" in _SSC:
1779 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
1780 if "SimulatedObservationAtCurrentOptimum" in _SSC:
1781 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
1786 if _QM in ["QR"]: # Pour le QuantileRegression
1791 # ==============================================================================
1792 if __name__ == "__main__":
1793 print('\n AUTODIAGNOSTIC \n')