1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2018 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
34 from daCore import Persistence
35 from daCore import PlatformInfo
36 from daCore import Interfaces
37 from daCore import Templates
38 from daCore.Interfaces import ImportFromScript, ImportFromFile
40 # ==============================================================================
41 class CacheManager(object):
43 Classe générale de gestion d'un cache de calculs
46 toleranceInRedundancy = 1.e-18,
47 lenghtOfRedundancy = -1,
50 Les caractéristiques de tolérance peuvent être modifées à la création.
52 self.__tolerBP = float(toleranceInRedundancy)
53 self.__lenghtOR = int(lenghtOfRedundancy)
54 self.__initlnOR = self.__lenghtOR
59 self.__listOPCV = [] # Operator Previous Calculated Points, Results, Point Norms
60 # logging.debug("CM Tolerance de determination des doublons : %.2e", self.__tolerBP)
62 def wasCalculatedIn(self, xValue ): #, info="" ):
63 "Vérifie l'existence d'un calcul correspondant à la valeur"
66 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
67 if xValue.size != self.__listOPCV[i][0].size:
68 # logging.debug("CM Différence de la taille %s de X et de celle %s du point %i déjà calculé", xValue.shape,i,self.__listOPCP[i].shape)
70 if numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < self.__tolerBP * self.__listOPCV[i][2]:
72 __HxV = self.__listOPCV[i][1]
73 # logging.debug("CM Cas%s déja calculé, portant le numéro %i", info, i)
77 def storeValueInX(self, xValue, HxValue ):
78 "Stocke un calcul correspondant à la valeur"
79 if self.__lenghtOR < 0:
80 self.__lenghtOR = 2 * xValue.size + 2
81 self.__initlnOR = self.__lenghtOR
82 while len(self.__listOPCV) > self.__lenghtOR:
83 # logging.debug("CM Réduction de la liste des cas à %i éléments par suppression du premier", self.__lenghtOR)
84 self.__listOPCV.pop(0)
85 self.__listOPCV.append( (
86 copy.copy(numpy.ravel(xValue)),
88 numpy.linalg.norm(xValue),
93 self.__initlnOR = self.__lenghtOR
98 self.__lenghtOR = self.__initlnOR
100 # ==============================================================================
101 class Operator(object):
103 Classe générale d'interface de type opérateur simple
110 def __init__(self, fromMethod=None, fromMatrix=None, avoidingRedundancy = True):
112 On construit un objet de ce type en fournissant à l'aide de l'un des
113 deux mots-clé, soit une fonction python, soit une matrice.
115 - fromMethod : argument de type fonction Python
116 - fromMatrix : argument adapté au constructeur numpy.matrix
117 - avoidingRedundancy : évite ou pas les calculs redondants
119 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
120 self.__AvoidRC = bool( avoidingRedundancy )
121 if fromMethod is not None:
122 self.__Method = fromMethod # logtimer(fromMethod)
124 self.__Type = "Method"
125 elif fromMatrix is not None:
127 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
128 self.__Type = "Matrix"
134 def disableAvoidingRedundancy(self):
136 Operator.CM.disable()
138 def enableAvoidingRedundancy(self):
143 Operator.CM.disable()
149 def appliedTo(self, xValue, HValue = None):
151 Permet de restituer le résultat de l'application de l'opérateur à un
152 argument xValue. Cette méthode se contente d'appliquer, son argument
153 devant a priori être du bon type.
155 - xValue : argument adapté pour appliquer l'opérateur
157 if HValue is not None:
158 HxValue = numpy.asmatrix( numpy.ravel( HValue ) ).T
160 Operator.CM.storeValueInX(xValue,HxValue)
163 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xValue)
165 __alreadyCalculated = False
167 if __alreadyCalculated:
168 self.__addOneCacheCall()
171 if self.__Matrix is not None:
172 self.__addOneMatrixCall()
173 HxValue = self.__Matrix * xValue
175 self.__addOneMethodCall()
176 HxValue = self.__Method( xValue )
178 Operator.CM.storeValueInX(xValue,HxValue)
182 def appliedControledFormTo(self, paire ):
184 Permet de restituer le résultat de l'application de l'opérateur à une
185 paire (xValue, uValue). Cette méthode se contente d'appliquer, son
186 argument devant a priori être du bon type. Si la uValue est None,
187 on suppose que l'opérateur ne s'applique qu'à xValue.
189 - xValue : argument X adapté pour appliquer l'opérateur
190 - uValue : argument U adapté pour appliquer l'opérateur
192 assert len(paire) == 2, "Incorrect number of arguments"
193 xValue, uValue = paire
194 if self.__Matrix is not None:
195 self.__addOneMatrixCall()
196 return self.__Matrix * xValue
197 elif uValue is not None:
198 self.__addOneMethodCall()
199 return self.__Method( (xValue, uValue) )
201 self.__addOneMethodCall()
202 return self.__Method( xValue )
204 def appliedInXTo(self, paire ):
206 Permet de restituer le résultat de l'application de l'opérateur à un
207 argument xValue, sachant que l'opérateur est valable en xNominal.
208 Cette méthode se contente d'appliquer, son argument devant a priori
209 être du bon type. Si l'opérateur est linéaire car c'est une matrice,
210 alors il est valable en tout point nominal et il n'est pas nécessaire
212 Arguments : une liste contenant
213 - xNominal : argument permettant de donner le point où l'opérateur
214 est construit pour etre ensuite appliqué
215 - xValue : argument adapté pour appliquer l'opérateur
217 assert len(paire) == 2, "Incorrect number of arguments"
218 xNominal, xValue = paire
219 if self.__Matrix is not None:
220 self.__addOneMatrixCall()
221 return self.__Matrix * xValue
223 self.__addOneMethodCall()
224 return self.__Method( (xNominal, xValue) )
226 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue"):
228 Permet de renvoyer l'opérateur sous la forme d'une matrice
230 if self.__Matrix is not None:
231 self.__addOneMatrixCall()
233 elif ValueForMethodForm is not "UnknownVoidValue": # Ne pas utiliser "None"
234 self.__addOneMethodCall()
235 return numpy.matrix( self.__Method( (ValueForMethodForm, None) ) )
237 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
241 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
242 la forme d'une matrice
244 if self.__Matrix is not None:
245 return self.__Matrix.shape
247 raise ValueError("Matrix form of the operator is not available, nor the shape")
249 def nbcalls(self, which=None):
251 Renvoie les nombres d'évaluations de l'opérateur
254 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
255 self.__NbCallsAsMatrix,
256 self.__NbCallsAsMethod,
257 self.__NbCallsOfCached,
258 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
259 Operator.NbCallsAsMatrix,
260 Operator.NbCallsAsMethod,
261 Operator.NbCallsOfCached,
263 if which is None: return __nbcalls
264 else: return __nbcalls[which]
266 def __addOneMatrixCall(self):
267 "Comptabilise un appel"
268 self.__NbCallsAsMatrix += 1 # Decompte local
269 Operator.NbCallsAsMatrix += 1 # Decompte global
271 def __addOneMethodCall(self):
272 "Comptabilise un appel"
273 self.__NbCallsAsMethod += 1 # Decompte local
274 Operator.NbCallsAsMethod += 1 # Decompte global
276 def __addOneCacheCall(self):
277 "Comptabilise un appel"
278 self.__NbCallsOfCached += 1 # Decompte local
279 Operator.NbCallsOfCached += 1 # Decompte global
281 # ==============================================================================
282 class FullOperator(object):
284 Classe générale d'interface de type opérateur complet
285 (Direct, Linéaire Tangent, Adjoint)
288 name = "GenericFullOperator",
290 asOneFunction = None, # Fonction
291 asThreeFunctions = None, # Fonctions dictionary
292 asScript = None, # Fonction(s) script
293 asDict = None, # Parameters
300 self.__name = str(name)
301 self.__check = bool(toBeChecked)
306 if (asDict is not None) and isinstance(asDict, dict):
307 __Parameters.update( asDict )
308 if "DifferentialIncrement" in asDict:
309 __Parameters["withIncrement"] = asDict["DifferentialIncrement"]
310 if "CenteredFiniteDifference" in asDict:
311 __Parameters["withCenteredDF"] = asDict["CenteredFiniteDifference"]
312 if "EnableMultiProcessing" in asDict:
313 __Parameters["withmpEnabled"] = asDict["EnableMultiProcessing"]
314 if "NumberOfProcesses" in asDict:
315 __Parameters["withmpWorkers"] = asDict["NumberOfProcesses"]
317 if asScript is not None:
318 __Matrix, __Function = None, None
320 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
322 __Function = { "Direct":ImportFromScript(asScript).getvalue( "DirectOperator" ) }
323 __Function.update({"useApproximatedDerivatives":True})
324 __Function.update(__Parameters)
325 elif asThreeFunctions:
327 "Direct" :ImportFromScript(asScript).getvalue( "DirectOperator" ),
328 "Tangent":ImportFromScript(asScript).getvalue( "TangentOperator" ),
329 "Adjoint":ImportFromScript(asScript).getvalue( "AdjointOperator" ),
331 __Function.update(__Parameters)
334 if asOneFunction is not None:
335 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
336 if asOneFunction["Direct"] is not None:
337 __Function = asOneFunction
339 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
341 __Function = { "Direct":asOneFunction }
342 __Function.update({"useApproximatedDerivatives":True})
343 __Function.update(__Parameters)
344 elif asThreeFunctions is not None:
345 if isinstance(asThreeFunctions, dict) and \
346 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
347 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
348 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
349 __Function = asThreeFunctions
350 elif isinstance(asThreeFunctions, dict) and \
351 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
352 __Function = asThreeFunctions
353 __Function.update({"useApproximatedDerivatives":True})
355 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
356 if "Direct" not in asThreeFunctions:
357 __Function["Direct"] = asThreeFunctions["Tangent"]
358 __Function.update(__Parameters)
362 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
363 # for k in ("Direct", "Tangent", "Adjoint"):
364 # if k in __Function and hasattr(__Function[k],"__class__"):
365 # if type(__Function[k]) is type(self.__init__):
366 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
368 if appliedInX is not None and isinstance(appliedInX, dict):
369 __appliedInX = appliedInX
370 elif appliedInX is not None:
371 __appliedInX = {"HXb":appliedInX}
375 if scheduledBy is not None:
376 self.__T = scheduledBy
378 if isinstance(__Function, dict) and \
379 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
380 ("Direct" in __Function) and (__Function["Direct"] is not None):
381 if "withCenteredDF" not in __Function: __Function["withCenteredDF"] = False
382 if "withIncrement" not in __Function: __Function["withIncrement"] = 0.01
383 if "withdX" not in __Function: __Function["withdX"] = None
384 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = True
385 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
386 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
387 if "withmpEnabled" not in __Function: __Function["withmpEnabled"] = False
388 if "withmpWorkers" not in __Function: __Function["withmpWorkers"] = None
389 from daNumerics.ApproximatedDerivatives import FDApproximation
390 FDA = FDApproximation(
391 Function = __Function["Direct"],
392 centeredDF = __Function["withCenteredDF"],
393 increment = __Function["withIncrement"],
394 dX = __Function["withdX"],
395 avoidingRedundancy = __Function["withAvoidingRedundancy"],
396 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
397 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
398 mpEnabled = __Function["withmpEnabled"],
399 mpWorkers = __Function["withmpWorkers"],
401 self.__FO["Direct"] = Operator( fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC )
402 self.__FO["Tangent"] = Operator( fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC )
403 self.__FO["Adjoint"] = Operator( fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC )
404 elif isinstance(__Function, dict) and \
405 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
406 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
407 self.__FO["Direct"] = Operator( fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC )
408 self.__FO["Tangent"] = Operator( fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC )
409 self.__FO["Adjoint"] = Operator( fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC )
410 elif asMatrix is not None:
411 __matrice = numpy.matrix( __Matrix, numpy.float )
412 self.__FO["Direct"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC )
413 self.__FO["Tangent"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC )
414 self.__FO["Adjoint"] = Operator( fromMatrix = __matrice.T, avoidingRedundancy = avoidRC )
417 raise ValueError("Improperly defined observation operator, it requires at minima either a matrix, a Direct for approximate derivatives or a Tangent/Adjoint pair.")
419 if __appliedInX is not None:
420 self.__FO["AppliedInX"] = {}
421 if not isinstance(__appliedInX, dict):
422 raise ValueError("Error: observation operator defined by \"AppliedInX\" need a dictionary as argument.")
423 for key in list(__appliedInX.keys()):
424 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
425 # Pour le cas où l'on a une vraie matrice
426 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
427 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
428 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
429 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
431 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
433 self.__FO["AppliedInX"] = None
439 "x.__repr__() <==> repr(x)"
440 return repr(self.__V)
443 "x.__str__() <==> str(x)"
446 # ==============================================================================
447 class Algorithm(object):
449 Classe générale d'interface de type algorithme
451 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
452 d'assimilation, en fournissant un container (dictionnaire) de variables
453 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
455 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
457 def __init__(self, name):
459 L'initialisation présente permet de fabriquer des variables de stockage
460 disponibles de manière générique dans les algorithmes élémentaires. Ces
461 variables de stockage sont ensuite conservées dans un dictionnaire
462 interne à l'objet, mais auquel on accède par la méthode "get".
464 Les variables prévues sont :
465 - CostFunctionJ : fonction-cout globale, somme des deux parties suivantes
466 - CostFunctionJb : partie ébauche ou background de la fonction-cout
467 - CostFunctionJo : partie observations de la fonction-cout
468 - GradientOfCostFunctionJ : gradient de la fonction-cout globale
469 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-cout
470 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-cout
471 - CurrentState : état courant lors d'itérations
472 - Analysis : l'analyse Xa
473 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
474 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
475 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
476 - Innovation : l'innovation : d = Y - H(X)
477 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
478 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
479 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
480 - MahalanobisConsistency : indicateur de consistance des covariances
481 - OMA : Observation moins Analysis : Y - Xa
482 - OMB : Observation moins Background : Y - Xb
483 - AMB : Analysis moins Background : Xa - Xb
484 - APosterioriCovariance : matrice A
485 - APosterioriVariances : variances de la matrice A
486 - APosterioriStandardDeviations : écart-types de la matrice A
487 - APosterioriCorrelations : correlations de la matrice A
488 - Residu : dans le cas des algorithmes de vérification
489 On peut rajouter des variables à stocker dans l'initialisation de
490 l'algorithme élémentaire qui va hériter de cette classe
492 logging.debug("%s Initialisation", str(name))
493 self._m = PlatformInfo.SystemUsage()
495 self._name = str( name )
496 self._parameters = {"StoreSupplementaryCalculations":[]}
497 self.__required_parameters = {}
498 self.__required_inputs = {"RequiredInputValues":{"mandatory":(), "optional":()}}
500 self.StoredVariables = {}
501 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
502 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
503 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
504 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
505 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
506 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
507 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
508 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
509 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
510 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
511 self.StoredVariables["PredictedState"] = Persistence.OneVector(name = "PredictedState")
512 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
513 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
514 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
515 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
516 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
517 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
518 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
519 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
520 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
521 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
522 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
523 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
524 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
525 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
526 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
527 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
528 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
529 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
530 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
531 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
532 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
534 def _pre_run(self, Parameters, Xb=None, Y=None, R=None, B=None, Q=None ):
536 logging.debug("%s Lancement", self._name)
537 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
539 # Mise a jour de self._parameters avec Parameters
540 self.__setParameters(Parameters)
542 # Corrections et complements
543 def __test_vvalue(argument, variable, argname):
545 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
546 raise ValueError("%s %s vector %s has to be properly defined!"%(self._name,argname,variable))
547 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
548 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,variable))
550 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,variable))
552 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,variable,numpy.array(argument).size))
554 __test_vvalue( Xb, "Xb", "Background or initial state" )
555 __test_vvalue( Y, "Y", "Observation" )
557 def __test_cvalue(argument, variable, argname):
559 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
560 raise ValueError("%s %s error covariance matrix %s has to be properly defined!"%(self._name,argname,variable))
561 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
562 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,variable))
564 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,variable))
566 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,variable))
568 __test_cvalue( R, "R", "Observation" )
569 __test_cvalue( B, "B", "Background" )
570 __test_cvalue( Q, "Q", "Evolution" )
572 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
573 logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
575 self._parameters["Bounds"] = None
577 if logging.getLogger().level < logging.WARNING:
578 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
579 if PlatformInfo.has_scipy:
580 import scipy.optimize
581 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
583 self._parameters["optmessages"] = 15
585 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
586 if PlatformInfo.has_scipy:
587 import scipy.optimize
588 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
590 self._parameters["optmessages"] = 15
594 def _post_run(self,_oH=None):
596 if ("StoreSupplementaryCalculations" in self._parameters) and \
597 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
598 for _A in self.StoredVariables["APosterioriCovariance"]:
599 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
600 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
601 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
602 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
603 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
604 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
605 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
606 self.StoredVariables["APosterioriCorrelations"].store( _C )
608 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
609 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
610 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
611 logging.debug("%s Terminé", self._name)
614 def _toStore(self, key):
615 "True if in StoreSupplementaryCalculations, else False"
616 return key in self._parameters["StoreSupplementaryCalculations"]
618 def get(self, key=None):
620 Renvoie l'une des variables stockées identifiée par la clé, ou le
621 dictionnaire de l'ensemble des variables disponibles en l'absence de
622 clé. Ce sont directement les variables sous forme objet qui sont
623 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
624 des classes de persistance.
627 return self.StoredVariables[key]
629 return self.StoredVariables
631 def __contains__(self, key=None):
632 "D.__contains__(k) -> True if D has a key k, else False"
633 return key in self.StoredVariables
636 "D.keys() -> list of D's keys"
637 if hasattr(self, "StoredVariables"):
638 return self.StoredVariables.keys()
643 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
644 if hasattr(self, "StoredVariables"):
645 return self.StoredVariables.pop(k, d)
650 raise TypeError("pop expected at least 1 arguments, got 0")
651 "If key is not found, d is returned if given, otherwise KeyError is raised"
657 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
659 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
660 sa forme mathématique la plus naturelle possible.
662 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
664 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None):
666 Permet de définir dans l'algorithme des paramètres requis et leurs
667 caractéristiques par défaut.
670 raise ValueError("A name is mandatory to define a required parameter.")
672 self.__required_parameters[name] = {
674 "typecast" : typecast,
680 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
682 def getRequiredParameters(self, noDetails=True):
684 Renvoie la liste des noms de paramètres requis ou directement le
685 dictionnaire des paramètres requis.
688 return sorted(self.__required_parameters.keys())
690 return self.__required_parameters
692 def setParameterValue(self, name=None, value=None):
694 Renvoie la valeur d'un paramètre requis de manière contrôlée
696 default = self.__required_parameters[name]["default"]
697 typecast = self.__required_parameters[name]["typecast"]
698 minval = self.__required_parameters[name]["minval"]
699 maxval = self.__required_parameters[name]["maxval"]
700 listval = self.__required_parameters[name]["listval"]
702 if value is None and default is None:
704 elif value is None and default is not None:
705 if typecast is None: __val = default
706 else: __val = typecast( default )
708 if typecast is None: __val = value
709 else: __val = typecast( value )
711 if minval is not None and (numpy.array(__val, float) < minval).any():
712 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be less than %s."%(name, __val, minval))
713 if maxval is not None and (numpy.array(__val, float) > maxval).any():
714 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be greater than %s."%(name, __val, maxval))
715 if listval is not None:
716 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
719 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%(v, name, listval))
720 elif __val not in listval:
721 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%( __val, name,listval))
724 def requireInputArguments(self, mandatory=(), optional=()):
726 Permet d'imposer des arguments requises en entrée
728 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
729 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
731 def __setParameters(self, fromDico={}):
733 Permet de stocker les paramètres reçus dans le dictionnaire interne.
735 self._parameters.update( fromDico )
736 for k in self.__required_parameters.keys():
737 if k in fromDico.keys():
738 self._parameters[k] = self.setParameterValue(k,fromDico[k])
740 self._parameters[k] = self.setParameterValue(k)
741 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
743 # ==============================================================================
744 class AlgorithmAndParameters(object):
746 Classe générale d'interface d'action pour l'algorithme et ses paramètres
749 name = "GenericAlgorithm",
756 self.__name = str(name)
760 self.__algorithm = {}
761 self.__algorithmFile = None
762 self.__algorithmName = None
764 self.updateParameters( asDict, asScript )
766 if asAlgorithm is None and asScript is not None:
767 __Algo = ImportFromScript(asScript).getvalue( "Algorithm" )
771 if __Algo is not None:
772 self.__A = str(__Algo)
773 self.__P.update( {"Algorithm":self.__A} )
775 self.__setAlgorithm( self.__A )
777 def updateParameters(self,
781 "Mise a jour des parametres"
782 if asDict is None and asScript is not None:
783 __Dict = ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
787 if __Dict is not None:
788 self.__P.update( dict(__Dict) )
790 def executePythonScheme(self, asDictAO = None):
791 "Permet de lancer le calcul d'assimilation"
792 Operator.CM.clearCache()
794 if not isinstance(asDictAO, dict):
795 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
796 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
797 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
798 else: self.__Xb = None
799 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
800 else: self.__Y = asDictAO["Observation"]
801 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
802 else: self.__U = asDictAO["ControlInput"]
803 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
804 else: self.__HO = asDictAO["ObservationOperator"]
805 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
806 else: self.__EM = asDictAO["EvolutionModel"]
807 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
808 else: self.__CM = asDictAO["ControlModel"]
809 self.__B = asDictAO["BackgroundError"]
810 self.__R = asDictAO["ObservationError"]
811 self.__Q = asDictAO["EvolutionError"]
813 self.__shape_validate()
815 self.__algorithm.run(
825 Parameters = self.__P,
829 def executeYACSScheme(self, FileName=None):
830 "Permet de lancer le calcul d'assimilation"
831 if FileName is None or not os.path.exists(FileName):
832 raise ValueError("a YACS file name has to be given for YACS execution.\n")
834 __file = os.path.abspath(FileName)
835 logging.debug("The YACS file name is \"%s\"."%__file)
836 if not PlatformInfo.has_salome or \
837 not PlatformInfo.has_yacs or \
838 not PlatformInfo.has_adao:
839 raise ImportError("\n\n"+\
840 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
841 "Please load the right environnement before trying to use it.\n")
846 SALOMERuntime.RuntimeSALOME_setRuntime()
848 r = pilot.getRuntime()
849 xmlLoader = loader.YACSLoader()
850 xmlLoader.registerProcCataLoader()
852 catalogAd = r.loadCatalog("proc", __file)
853 r.addCatalog(catalogAd)
858 p = xmlLoader.load(__file)
859 except IOError as ex:
860 print("The YACS XML schema file can not be loaded: %s"%(ex,))
862 logger = p.getLogger("parser")
863 if not logger.isEmpty():
864 print("The imported YACS XML schema has errors on parsing:")
865 print(logger.getStr())
868 print("The YACS XML schema is not valid and will not be executed:")
869 print(p.getErrorReport())
871 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
872 p.checkConsistency(info)
873 if info.areWarningsOrErrors():
874 print("The YACS XML schema is not coherent and will not be executed:")
875 print(info.getGlobalRepr())
877 e = pilot.ExecutorSwig()
879 if p.getEffectiveState() != pilot.DONE:
880 print(p.getErrorReport())
884 def get(self, key = None):
885 "Vérifie l'existence d'une clé de variable ou de paramètres"
886 if key in self.__algorithm:
887 return self.__algorithm.get( key )
888 elif key in self.__P:
894 "Necessaire pour le pickling"
895 return self.__algorithm.pop(k, d)
897 def getAlgorithmRequiredParameters(self, noDetails=True):
898 "Renvoie la liste des paramètres requis selon l'algorithme"
899 return self.__algorithm.getRequiredParameters(noDetails)
901 def setObserver(self, __V, __O, __I, __S):
902 if self.__algorithm is None \
903 or isinstance(self.__algorithm, dict) \
904 or not hasattr(self.__algorithm,"StoredVariables"):
905 raise ValueError("No observer can be build before choosing an algorithm.")
906 if __V not in self.__algorithm:
907 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
909 self.__algorithm.StoredVariables[ __V ].setDataObserver(
912 HookParameters = __I,
915 def removeObserver(self, __V, __O, __A = False):
916 if self.__algorithm is None \
917 or isinstance(self.__algorithm, dict) \
918 or not hasattr(self.__algorithm,"StoredVariables"):
919 raise ValueError("No observer can be removed before choosing an algorithm.")
920 if __V not in self.__algorithm:
921 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
923 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
928 def hasObserver(self, __V):
929 if self.__algorithm is None \
930 or isinstance(self.__algorithm, dict) \
931 or not hasattr(self.__algorithm,"StoredVariables"):
933 if __V not in self.__algorithm:
935 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
938 return list(self.__algorithm.keys()) + list(self.__P.keys())
940 def __contains__(self, key=None):
941 "D.__contains__(k) -> True if D has a key k, else False"
942 return key in self.__algorithm or key in self.__P
945 "x.__repr__() <==> repr(x)"
946 return repr(self.__A)+", "+repr(self.__P)
949 "x.__str__() <==> str(x)"
950 return str(self.__A)+", "+str(self.__P)
952 def __setAlgorithm(self, choice = None ):
954 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
955 d'assimilation. L'argument est un champ caractère se rapportant au nom
956 d'un algorithme réalisant l'opération sur les arguments fixes.
959 raise ValueError("Error: algorithm choice has to be given")
960 if self.__algorithmName is not None:
961 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
962 daDirectory = "daAlgorithms"
964 # Recherche explicitement le fichier complet
965 # ------------------------------------------
967 for directory in sys.path:
968 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
969 module_path = os.path.abspath(os.path.join(directory, daDirectory))
970 if module_path is None:
971 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
973 # Importe le fichier complet comme un module
974 # ------------------------------------------
976 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
977 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
978 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
979 raise ImportError("this module does not define a valid elementary algorithm.")
980 self.__algorithmName = str(choice)
981 sys.path = sys_path_tmp ; del sys_path_tmp
982 except ImportError as e:
983 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
985 # Instancie un objet du type élémentaire du fichier
986 # -------------------------------------------------
987 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
990 def __shape_validate(self):
992 Validation de la correspondance correcte des tailles des variables et
993 des matrices s'il y en a.
995 if self.__Xb is None: __Xb_shape = (0,)
996 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
997 elif hasattr(self.__Xb,"shape"):
998 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
999 else: __Xb_shape = self.__Xb.shape()
1000 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1002 if self.__Y is None: __Y_shape = (0,)
1003 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1004 elif hasattr(self.__Y,"shape"):
1005 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1006 else: __Y_shape = self.__Y.shape()
1007 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1009 if self.__U is None: __U_shape = (0,)
1010 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1011 elif hasattr(self.__U,"shape"):
1012 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1013 else: __U_shape = self.__U.shape()
1014 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1016 if self.__B is None: __B_shape = (0,0)
1017 elif hasattr(self.__B,"shape"):
1018 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1019 else: __B_shape = self.__B.shape()
1020 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1022 if self.__R is None: __R_shape = (0,0)
1023 elif hasattr(self.__R,"shape"):
1024 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1025 else: __R_shape = self.__R.shape()
1026 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1028 if self.__Q is None: __Q_shape = (0,0)
1029 elif hasattr(self.__Q,"shape"):
1030 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1031 else: __Q_shape = self.__Q.shape()
1032 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1034 if len(self.__HO) == 0: __HO_shape = (0,0)
1035 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1036 elif hasattr(self.__HO["Direct"],"shape"):
1037 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1038 else: __HO_shape = self.__HO["Direct"].shape()
1039 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1041 if len(self.__EM) == 0: __EM_shape = (0,0)
1042 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1043 elif hasattr(self.__EM["Direct"],"shape"):
1044 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1045 else: __EM_shape = self.__EM["Direct"].shape()
1046 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1048 if len(self.__CM) == 0: __CM_shape = (0,0)
1049 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1050 elif hasattr(self.__CM["Direct"],"shape"):
1051 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1052 else: __CM_shape = self.__CM["Direct"].shape()
1053 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1055 # Vérification des conditions
1056 # ---------------------------
1057 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1058 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1059 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1060 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1062 if not( min(__B_shape) == max(__B_shape) ):
1063 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1064 if not( min(__R_shape) == max(__R_shape) ):
1065 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1066 if not( min(__Q_shape) == max(__Q_shape) ):
1067 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1068 if not( min(__EM_shape) == max(__EM_shape) ):
1069 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1071 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1072 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1073 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1074 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1075 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1076 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1077 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1078 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1080 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1081 if self.__algorithmName in ["EnsembleBlue",]:
1082 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1083 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1084 for member in asPersistentVector:
1085 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1086 __Xb_shape = min(__B_shape)
1088 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1090 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1091 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1093 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1094 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1096 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1097 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1099 if ("Bounds" in self.__P) \
1100 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1101 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1102 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1103 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1107 # ==============================================================================
1108 class RegulationAndParameters(object):
1110 Classe générale d'interface d'action pour la régulation et ses paramètres
1113 name = "GenericRegulation",
1120 self.__name = str(name)
1123 if asAlgorithm is None and asScript is not None:
1124 __Algo = ImportFromScript(asScript).getvalue( "Algorithm" )
1126 __Algo = asAlgorithm
1128 if asDict is None and asScript is not None:
1129 __Dict = ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1133 if __Dict is not None:
1134 self.__P.update( dict(__Dict) )
1136 if __Algo is not None:
1137 self.__P.update( {"Algorithm":self.__A} )
1139 def get(self, key = None):
1140 "Vérifie l'existence d'une clé de variable ou de paramètres"
1142 return self.__P[key]
1146 # ==============================================================================
1147 class DataObserver(object):
1149 Classe générale d'interface de type observer
1152 name = "GenericObserver",
1164 self.__name = str(name)
1169 if onVariable is None:
1170 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1171 elif type(onVariable) in (tuple, list):
1172 self.__V = tuple(map( str, onVariable ))
1173 if withInfo is None:
1176 self.__I = (str(withInfo),)*len(self.__V)
1177 elif isinstance(onVariable, str):
1178 self.__V = (onVariable,)
1179 if withInfo is None:
1180 self.__I = (onVariable,)
1182 self.__I = (str(withInfo),)
1184 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1186 if asString is not None:
1187 __FunctionText = asString
1188 elif (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1189 __FunctionText = Templates.ObserverTemplates[asTemplate]
1190 elif asScript is not None:
1191 __FunctionText = ImportFromScript(asScript).getstring()
1194 __Function = ObserverF(__FunctionText)
1196 if asObsObject is not None:
1197 self.__O = asObsObject
1199 self.__O = __Function.getfunc()
1201 for k in range(len(self.__V)):
1204 if ename not in withAlgo:
1205 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1207 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1210 "x.__repr__() <==> repr(x)"
1211 return repr(self.__V)+"\n"+repr(self.__O)
1214 "x.__str__() <==> str(x)"
1215 return str(self.__V)+"\n"+str(self.__O)
1217 # ==============================================================================
1218 class State(object):
1220 Classe générale d'interface de type état
1223 name = "GenericVector",
1225 asPersistentVector = None,
1231 toBeChecked = False,
1234 Permet de définir un vecteur :
1235 - asVector : entrée des données, comme un vecteur compatible avec le
1236 constructeur de numpy.matrix, ou "True" si entrée par script.
1237 - asPersistentVector : entrée des données, comme une série de vecteurs
1238 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1239 type Persistence, ou "True" si entrée par script.
1240 - asScript : si un script valide est donné contenant une variable
1241 nommée "name", la variable est de type "asVector" (par défaut) ou
1242 "asPersistentVector" selon que l'une de ces variables est placée à
1244 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1245 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1246 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1247 nommée "name"), on récupère les colonnes et on les range ligne après
1248 ligne (colMajor=False) ou colonne après colonne (colMajor=True). La
1249 variable résultante est de type "asVector" (par défaut) ou
1250 "asPersistentVector" selon que l'une de ces variables est placée à
1253 self.__name = str(name)
1254 self.__check = bool(toBeChecked)
1258 self.__is_vector = False
1259 self.__is_series = False
1261 if asScript is not None:
1262 __Vector, __Series = None, None
1263 if asPersistentVector:
1264 __Series = ImportFromScript(asScript).getvalue( self.__name )
1266 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1267 elif asDataFile is not None:
1268 __Vector, __Series = None, None
1269 if asPersistentVector:
1270 if colNames is not None:
1271 __Series = ImportFromFile(asDataFile).getvalue( colNames )[1]
1273 __Series = ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1274 if bool(colMajor) and not ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1275 __Series = numpy.transpose(__Series)
1276 elif not bool(colMajor) and ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1277 __Series = numpy.transpose(__Series)
1279 if colNames is not None:
1280 __Vector = ImportFromFile(asDataFile).getvalue( colNames )[1]
1282 __Vector = ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1284 __Vector = numpy.ravel(__Vector, order = "F")
1286 __Vector = numpy.ravel(__Vector, order = "C")
1288 __Vector, __Series = asVector, asPersistentVector
1290 if __Vector is not None:
1291 self.__is_vector = True
1292 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1293 self.shape = self.__V.shape
1294 self.size = self.__V.size
1295 elif __Series is not None:
1296 self.__is_series = True
1297 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1298 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1299 if isinstance(__Series, str): __Series = eval(__Series)
1300 for member in __Series:
1301 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1304 if isinstance(self.__V.shape, (tuple, list)):
1305 self.shape = self.__V.shape
1307 self.shape = self.__V.shape()
1308 if len(self.shape) == 1:
1309 self.shape = (self.shape[0],1)
1310 self.size = self.shape[0] * self.shape[1]
1312 raise ValueError("The %s object is improperly defined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1314 if scheduledBy is not None:
1315 self.__T = scheduledBy
1317 def getO(self, withScheduler=False):
1319 return self.__V, self.__T
1320 elif self.__T is None:
1326 "Vérification du type interne"
1327 return self.__is_vector
1330 "Vérification du type interne"
1331 return self.__is_series
1334 "x.__repr__() <==> repr(x)"
1335 return repr(self.__V)
1338 "x.__str__() <==> str(x)"
1339 return str(self.__V)
1341 # ==============================================================================
1342 class Covariance(object):
1344 Classe générale d'interface de type covariance
1347 name = "GenericCovariance",
1348 asCovariance = None,
1349 asEyeByScalar = None,
1350 asEyeByVector = None,
1353 toBeChecked = False,
1356 Permet de définir une covariance :
1357 - asCovariance : entrée des données, comme une matrice compatible avec
1358 le constructeur de numpy.matrix
1359 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1360 multiplicatif d'une matrice de corrélation identité, aucune matrice
1361 n'étant donc explicitement à donner
1362 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1363 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1364 n'étant donc explicitement à donner
1365 - asCovObject : entrée des données comme un objet python, qui a les
1366 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1367 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1368 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1369 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1370 pleine doit être vérifié
1372 self.__name = str(name)
1373 self.__check = bool(toBeChecked)
1376 self.__is_scalar = False
1377 self.__is_vector = False
1378 self.__is_matrix = False
1379 self.__is_object = False
1381 if asScript is not None:
1382 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1384 __Scalar = ImportFromScript(asScript).getvalue( self.__name )
1386 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1388 __Object = ImportFromScript(asScript).getvalue( self.__name )
1390 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
1392 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1394 if __Scalar is not None:
1395 if numpy.matrix(__Scalar).size != 1:
1396 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size)
1397 self.__is_scalar = True
1398 self.__C = numpy.abs( float(__Scalar) )
1401 elif __Vector is not None:
1402 self.__is_vector = True
1403 self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) )
1404 self.shape = (self.__C.size,self.__C.size)
1405 self.size = self.__C.size**2
1406 elif __Matrix is not None:
1407 self.__is_matrix = True
1408 self.__C = numpy.matrix( __Matrix, float )
1409 self.shape = self.__C.shape
1410 self.size = self.__C.size
1411 elif __Object is not None:
1412 self.__is_object = True
1414 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"):
1415 if not hasattr(self.__C,at):
1416 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1417 if hasattr(self.__C,"shape"):
1418 self.shape = self.__C.shape
1421 if hasattr(self.__C,"size"):
1422 self.size = self.__C.size
1427 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1431 def __validate(self):
1433 if self.__C is None:
1434 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1435 if self.ismatrix() and min(self.shape) != max(self.shape):
1436 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1437 if self.isobject() and min(self.shape) != max(self.shape):
1438 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1439 if self.isscalar() and self.__C <= 0:
1440 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1441 if self.isvector() and (self.__C <= 0).any():
1442 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1443 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1445 L = numpy.linalg.cholesky( self.__C )
1447 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1448 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1450 L = self.__C.cholesky()
1452 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1455 "Vérification du type interne"
1456 return self.__is_scalar
1459 "Vérification du type interne"
1460 return self.__is_vector
1463 "Vérification du type interne"
1464 return self.__is_matrix
1467 "Vérification du type interne"
1468 return self.__is_object
1473 return Covariance(self.__name+"I", asCovariance = self.__C.I )
1474 elif self.isvector():
1475 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1476 elif self.isscalar():
1477 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1478 elif self.isobject():
1479 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1481 return None # Indispensable
1486 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1487 elif self.isvector():
1488 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1489 elif self.isscalar():
1490 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1491 elif self.isobject():
1492 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1495 "Décomposition de Cholesky"
1497 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1498 elif self.isvector():
1499 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1500 elif self.isscalar():
1501 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1502 elif self.isobject() and hasattr(self.__C,"cholesky"):
1503 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1505 def choleskyI(self):
1506 "Inversion de la décomposition de Cholesky"
1508 return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I )
1509 elif self.isvector():
1510 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1511 elif self.isscalar():
1512 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1513 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1514 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1516 def diag(self, msize=None):
1517 "Diagonale de la matrice"
1519 return numpy.diag(self.__C)
1520 elif self.isvector():
1522 elif self.isscalar():
1524 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1526 return self.__C * numpy.ones(int(msize))
1527 elif self.isobject():
1528 return self.__C.diag()
1530 def asfullmatrix(self, msize=None):
1534 elif self.isvector():
1535 return numpy.matrix( numpy.diag(self.__C), float )
1536 elif self.isscalar():
1538 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1540 return numpy.matrix( self.__C * numpy.eye(int(msize)), float )
1541 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1542 return self.__C.asfullmatrix()
1544 def trace(self, msize=None):
1545 "Trace de la matrice"
1547 return numpy.trace(self.__C)
1548 elif self.isvector():
1549 return float(numpy.sum(self.__C))
1550 elif self.isscalar():
1552 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1554 return self.__C * int(msize)
1555 elif self.isobject():
1556 return self.__C.trace()
1562 "x.__repr__() <==> repr(x)"
1563 return repr(self.__C)
1566 "x.__str__() <==> str(x)"
1567 return str(self.__C)
1569 def __add__(self, other):
1570 "x.__add__(y) <==> x+y"
1571 if self.ismatrix() or self.isobject():
1572 return self.__C + numpy.asmatrix(other)
1573 elif self.isvector() or self.isscalar():
1574 _A = numpy.asarray(other)
1575 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
1576 return numpy.asmatrix(_A)
1578 def __radd__(self, other):
1579 "x.__radd__(y) <==> y+x"
1580 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
1582 def __sub__(self, other):
1583 "x.__sub__(y) <==> x-y"
1584 if self.ismatrix() or self.isobject():
1585 return self.__C - numpy.asmatrix(other)
1586 elif self.isvector() or self.isscalar():
1587 _A = numpy.asarray(other)
1588 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
1589 return numpy.asmatrix(_A)
1591 def __rsub__(self, other):
1592 "x.__rsub__(y) <==> y-x"
1593 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
1596 "x.__neg__() <==> -x"
1599 def __mul__(self, other):
1600 "x.__mul__(y) <==> x*y"
1601 if self.ismatrix() and isinstance(other,numpy.matrix):
1602 return self.__C * other
1603 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1604 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1605 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1606 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1607 return self.__C * numpy.asmatrix(other)
1609 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
1610 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
1611 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1612 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
1613 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1614 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
1616 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1617 elif self.isscalar() and isinstance(other,numpy.matrix):
1618 return self.__C * other
1619 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
1620 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
1621 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1623 return self.__C * numpy.asmatrix(other)
1624 elif self.isobject():
1625 return self.__C.__mul__(other)
1627 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
1629 def __rmul__(self, other):
1630 "x.__rmul__(y) <==> y*x"
1631 if self.ismatrix() and isinstance(other,numpy.matrix):
1632 return other * self.__C
1633 elif self.isvector() and isinstance(other,numpy.matrix):
1634 if numpy.ravel(other).size == self.shape[0]: # Vecteur
1635 return numpy.asmatrix(numpy.ravel(other) * self.__C)
1636 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
1637 return numpy.asmatrix(numpy.array(other) * self.__C)
1639 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1640 elif self.isscalar() and isinstance(other,numpy.matrix):
1641 return other * self.__C
1642 elif self.isobject():
1643 return self.__C.__rmul__(other)
1645 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
1648 "x.__len__() <==> len(x)"
1649 return self.shape[0]
1651 # ==============================================================================
1652 class ObserverF(object):
1654 Creation d'une fonction d'observateur a partir de son texte
1656 def __init__(self, corps=""):
1657 self.__corps = corps
1658 def func(self,var,info):
1659 "Fonction d'observation"
1662 "Restitution du pointeur de fonction dans l'objet"
1665 # ==============================================================================
1666 class CaseLogger(object):
1668 Conservation des commandes de creation d'un cas
1670 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
1671 self.__name = str(__name)
1672 self.__objname = str(__objname)
1673 self.__logSerie = []
1674 self.__switchoff = False
1676 "TUI" :Interfaces._TUIViewer,
1677 "SCD" :Interfaces._SCDViewer,
1678 "YACS":Interfaces._YACSViewer,
1681 "TUI" :Interfaces._TUIViewer,
1682 "COM" :Interfaces._COMViewer,
1684 if __addViewers is not None:
1685 self.__viewers.update(dict(__addViewers))
1686 if __addLoaders is not None:
1687 self.__loaders.update(dict(__addLoaders))
1689 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
1690 "Enregistrement d'une commande individuelle"
1691 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
1692 if "self" in __keys: __keys.remove("self")
1693 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
1695 self.__switchoff = True
1697 self.__switchoff = False
1699 def dump(self, __filename=None, __format="TUI", __upa=""):
1700 "Restitution normalisée des commandes"
1701 if __format in self.__viewers:
1702 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
1704 raise ValueError("Dumping as \"%s\" is not available"%__format)
1705 return __formater.dump(__filename, __upa)
1707 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
1708 "Chargement normalisé des commandes"
1709 if __format in self.__loaders:
1710 __formater = self.__loaders[__format]()
1712 raise ValueError("Loading as \"%s\" is not available"%__format)
1713 return __formater.load(__filename, __content, __object)
1715 # ==============================================================================
1716 def CostFunction3D(_x,
1717 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
1718 _HmX = None, # Simulation déjà faite de Hm(x)
1719 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
1724 _SIV = False, # A résorber pour la 8.0
1725 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
1726 _nPS = 0, # nbPreviousSteps
1727 _QM = "DA", # QualityMeasure
1728 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
1729 _fRt = False, # Restitue ou pas la sortie étendue
1730 _sSc = True, # Stocke ou pas les SSC
1733 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
1734 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
1735 DFO, QuantileRegression
1741 for k in ["CostFunctionJ",
1747 "SimulatedObservationAtCurrentOptimum",
1748 "SimulatedObservationAtCurrentState",
1752 if hasattr(_SSV[k],"store"):
1753 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
1755 _X = numpy.asmatrix(numpy.ravel( _x )).T
1756 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
1757 _SSV["CurrentState"].append( _X )
1759 if _HmX is not None:
1763 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
1767 _HX = _Hm( _X, *_arg )
1768 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
1770 if "SimulatedObservationAtCurrentState" in _SSC or \
1771 "SimulatedObservationAtCurrentOptimum" in _SSC:
1772 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
1774 if numpy.any(numpy.isnan(_HX)):
1775 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
1777 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
1778 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
1779 if _BI is None or _RI is None:
1780 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
1781 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
1782 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
1783 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1784 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
1786 raise ValueError("Observation error covariance matrix has to be properly defined!")
1788 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1789 elif _QM in ["LeastSquares", "LS", "L2"]:
1791 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
1792 elif _QM in ["AbsoluteValue", "L1"]:
1794 Jo = numpy.sum( numpy.abs(_Y - _HX) )
1795 elif _QM in ["MaximumError", "ME"]:
1797 Jo = numpy.max( numpy.abs(_Y - _HX) )
1798 elif _QM in ["QR", "Null"]:
1802 raise ValueError("Unknown asked quality measure!")
1804 J = float( Jb ) + float( Jo )
1807 _SSV["CostFunctionJb"].append( Jb )
1808 _SSV["CostFunctionJo"].append( Jo )
1809 _SSV["CostFunctionJ" ].append( J )
1811 if "IndexOfOptimum" in _SSC or \
1812 "CurrentOptimum" in _SSC or \
1813 "SimulatedObservationAtCurrentOptimum" in _SSC:
1814 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
1815 if "IndexOfOptimum" in _SSC:
1816 _SSV["IndexOfOptimum"].append( IndexMin )
1817 if "CurrentOptimum" in _SSC:
1818 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
1819 if "SimulatedObservationAtCurrentOptimum" in _SSC:
1820 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
1825 if _QM in ["QR"]: # Pour le QuantileRegression
1830 # ==============================================================================
1831 if __name__ == "__main__":
1832 print('\n AUTODIAGNOSTIC \n')