1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2018 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
34 from daCore import Persistence
35 from daCore import PlatformInfo
36 from daCore import Interfaces
37 from daCore import Templates
38 from daCore.Interfaces import ImportFromScript, ImportFromFile
40 # ==============================================================================
41 class CacheManager(object):
43 Classe générale de gestion d'un cache de calculs
46 toleranceInRedundancy = 1.e-18,
47 lenghtOfRedundancy = -1,
50 Les caractéristiques de tolérance peuvent être modifées à la création.
52 self.__tolerBP = float(toleranceInRedundancy)
53 self.__lenghtOR = int(lenghtOfRedundancy)
54 self.__initlnOR = self.__lenghtOR
59 self.__listOPCV = [] # Operator Previous Calculated Points, Results, Point Norms
60 # logging.debug("CM Tolerance de determination des doublons : %.2e", self.__tolerBP)
62 def wasCalculatedIn(self, xValue ): #, info="" ):
63 "Vérifie l'existence d'un calcul correspondant à la valeur"
66 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
67 if xValue.size != self.__listOPCV[i][0].size:
68 # logging.debug("CM Différence de la taille %s de X et de celle %s du point %i déjà calculé", xValue.shape,i,self.__listOPCP[i].shape)
70 if numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < self.__tolerBP * self.__listOPCV[i][2]:
72 __HxV = self.__listOPCV[i][1]
73 # logging.debug("CM Cas%s déja calculé, portant le numéro %i", info, i)
77 def storeValueInX(self, xValue, HxValue ):
78 "Stocke un calcul correspondant à la valeur"
79 if self.__lenghtOR < 0:
80 self.__lenghtOR = 2 * xValue.size + 2
81 self.__initlnOR = self.__lenghtOR
82 while len(self.__listOPCV) > self.__lenghtOR:
83 # logging.debug("CM Réduction de la liste des cas à %i éléments par suppression du premier", self.__lenghtOR)
84 self.__listOPCV.pop(0)
85 self.__listOPCV.append( (
86 copy.copy(numpy.ravel(xValue)),
88 numpy.linalg.norm(xValue),
93 self.__initlnOR = self.__lenghtOR
98 self.__lenghtOR = self.__initlnOR
100 # ==============================================================================
101 class Operator(object):
103 Classe générale d'interface de type opérateur simple
110 def __init__(self, fromMethod=None, fromMatrix=None, avoidingRedundancy = True):
112 On construit un objet de ce type en fournissant à l'aide de l'un des
113 deux mots-clé, soit une fonction python, soit une matrice.
115 - fromMethod : argument de type fonction Python
116 - fromMatrix : argument adapté au constructeur numpy.matrix
117 - avoidingRedundancy : évite ou pas les calculs redondants
119 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
120 self.__AvoidRC = bool( avoidingRedundancy )
121 if fromMethod is not None:
122 self.__Method = fromMethod # logtimer(fromMethod)
124 self.__Type = "Method"
125 elif fromMatrix is not None:
127 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
128 self.__Type = "Matrix"
134 def disableAvoidingRedundancy(self):
136 Operator.CM.disable()
138 def enableAvoidingRedundancy(self):
143 Operator.CM.disable()
149 def appliedTo(self, xValue, HValue = None):
151 Permet de restituer le résultat de l'application de l'opérateur à un
152 argument xValue. Cette méthode se contente d'appliquer, son argument
153 devant a priori être du bon type.
155 - xValue : argument adapté pour appliquer l'opérateur
157 if HValue is not None:
158 HxValue = numpy.asmatrix( numpy.ravel( HValue ) ).T
160 Operator.CM.storeValueInX(xValue,HxValue)
163 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xValue)
165 __alreadyCalculated = False
167 if __alreadyCalculated:
168 self.__addOneCacheCall()
171 if self.__Matrix is not None:
172 self.__addOneMatrixCall()
173 HxValue = self.__Matrix * xValue
175 self.__addOneMethodCall()
176 HxValue = self.__Method( xValue )
178 Operator.CM.storeValueInX(xValue,HxValue)
182 def appliedControledFormTo(self, paire ):
184 Permet de restituer le résultat de l'application de l'opérateur à une
185 paire (xValue, uValue). Cette méthode se contente d'appliquer, son
186 argument devant a priori être du bon type. Si la uValue est None,
187 on suppose que l'opérateur ne s'applique qu'à xValue.
189 - xValue : argument X adapté pour appliquer l'opérateur
190 - uValue : argument U adapté pour appliquer l'opérateur
192 assert len(paire) == 2, "Incorrect number of arguments"
193 xValue, uValue = paire
194 if self.__Matrix is not None:
195 self.__addOneMatrixCall()
196 return self.__Matrix * xValue
197 elif uValue is not None:
198 self.__addOneMethodCall()
199 return self.__Method( (xValue, uValue) )
201 self.__addOneMethodCall()
202 return self.__Method( xValue )
204 def appliedInXTo(self, paire ):
206 Permet de restituer le résultat de l'application de l'opérateur à un
207 argument xValue, sachant que l'opérateur est valable en xNominal.
208 Cette méthode se contente d'appliquer, son argument devant a priori
209 être du bon type. Si l'opérateur est linéaire car c'est une matrice,
210 alors il est valable en tout point nominal et il n'est pas nécessaire
212 Arguments : une liste contenant
213 - xNominal : argument permettant de donner le point où l'opérateur
214 est construit pour etre ensuite appliqué
215 - xValue : argument adapté pour appliquer l'opérateur
217 assert len(paire) == 2, "Incorrect number of arguments"
218 xNominal, xValue = paire
219 if self.__Matrix is not None:
220 self.__addOneMatrixCall()
221 return self.__Matrix * xValue
223 self.__addOneMethodCall()
224 return self.__Method( (xNominal, xValue) )
226 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue"):
228 Permet de renvoyer l'opérateur sous la forme d'une matrice
230 if self.__Matrix is not None:
231 self.__addOneMatrixCall()
233 elif ValueForMethodForm is not "UnknownVoidValue": # Ne pas utiliser "None"
234 self.__addOneMethodCall()
235 return numpy.matrix( self.__Method( (ValueForMethodForm, None) ) )
237 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
241 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
242 la forme d'une matrice
244 if self.__Matrix is not None:
245 return self.__Matrix.shape
247 raise ValueError("Matrix form of the operator is not available, nor the shape")
249 def nbcalls(self, which=None):
251 Renvoie les nombres d'évaluations de l'opérateur
254 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
255 self.__NbCallsAsMatrix,
256 self.__NbCallsAsMethod,
257 self.__NbCallsOfCached,
258 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
259 Operator.NbCallsAsMatrix,
260 Operator.NbCallsAsMethod,
261 Operator.NbCallsOfCached,
263 if which is None: return __nbcalls
264 else: return __nbcalls[which]
266 def __addOneMatrixCall(self):
267 "Comptabilise un appel"
268 self.__NbCallsAsMatrix += 1 # Decompte local
269 Operator.NbCallsAsMatrix += 1 # Decompte global
271 def __addOneMethodCall(self):
272 "Comptabilise un appel"
273 self.__NbCallsAsMethod += 1 # Decompte local
274 Operator.NbCallsAsMethod += 1 # Decompte global
276 def __addOneCacheCall(self):
277 "Comptabilise un appel"
278 self.__NbCallsOfCached += 1 # Decompte local
279 Operator.NbCallsOfCached += 1 # Decompte global
281 # ==============================================================================
282 class FullOperator(object):
284 Classe générale d'interface de type opérateur complet
285 (Direct, Linéaire Tangent, Adjoint)
288 name = "GenericFullOperator",
290 asOneFunction = None, # Fonction
291 asThreeFunctions = None, # Fonctions dictionary
292 asScript = None, # Fonction(s) script
293 asDict = None, # Parameters
300 self.__name = str(name)
301 self.__check = bool(toBeChecked)
306 if (asDict is not None) and isinstance(asDict, dict):
307 __Parameters.update( asDict )
308 if "DifferentialIncrement" in asDict:
309 __Parameters["withIncrement"] = asDict["DifferentialIncrement"]
310 if "CenteredFiniteDifference" in asDict:
311 __Parameters["withCenteredDF"] = asDict["CenteredFiniteDifference"]
312 if "EnableMultiProcessing" in asDict:
313 __Parameters["withmpEnabled"] = asDict["EnableMultiProcessing"]
314 if "NumberOfProcesses" in asDict:
315 __Parameters["withmpWorkers"] = asDict["NumberOfProcesses"]
317 if asScript is not None:
318 __Matrix, __Function = None, None
320 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
322 __Function = { "Direct":ImportFromScript(asScript).getvalue( "DirectOperator" ) }
323 __Function.update({"useApproximatedDerivatives":True})
324 __Function.update(__Parameters)
325 elif asThreeFunctions:
327 "Direct" :ImportFromScript(asScript).getvalue( "DirectOperator" ),
328 "Tangent":ImportFromScript(asScript).getvalue( "TangentOperator" ),
329 "Adjoint":ImportFromScript(asScript).getvalue( "AdjointOperator" ),
331 __Function.update(__Parameters)
334 if asOneFunction is not None:
335 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
336 if asOneFunction["Direct"] is not None:
337 __Function = asOneFunction
339 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
341 __Function = { "Direct":asOneFunction }
342 __Function.update({"useApproximatedDerivatives":True})
343 __Function.update(__Parameters)
344 elif asThreeFunctions is not None:
345 if isinstance(asThreeFunctions, dict) and \
346 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
347 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
348 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
349 __Function = asThreeFunctions
350 elif isinstance(asThreeFunctions, dict) and \
351 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
352 __Function = asThreeFunctions
353 __Function.update({"useApproximatedDerivatives":True})
355 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
356 if "Direct" not in asThreeFunctions:
357 __Function["Direct"] = asThreeFunctions["Tangent"]
358 __Function.update(__Parameters)
362 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
363 # for k in ("Direct", "Tangent", "Adjoint"):
364 # if k in __Function and hasattr(__Function[k],"__class__"):
365 # if type(__Function[k]) is type(self.__init__):
366 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
368 if appliedInX is not None and isinstance(appliedInX, dict):
369 __appliedInX = appliedInX
370 elif appliedInX is not None:
371 __appliedInX = {"HXb":appliedInX}
375 if scheduledBy is not None:
376 self.__T = scheduledBy
378 if isinstance(__Function, dict) and \
379 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
380 ("Direct" in __Function) and (__Function["Direct"] is not None):
381 if "withCenteredDF" not in __Function: __Function["withCenteredDF"] = False
382 if "withIncrement" not in __Function: __Function["withIncrement"] = 0.01
383 if "withdX" not in __Function: __Function["withdX"] = None
384 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = True
385 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
386 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
387 if "withmpEnabled" not in __Function: __Function["withmpEnabled"] = False
388 if "withmpWorkers" not in __Function: __Function["withmpWorkers"] = None
389 from daNumerics.ApproximatedDerivatives import FDApproximation
390 FDA = FDApproximation(
391 Function = __Function["Direct"],
392 centeredDF = __Function["withCenteredDF"],
393 increment = __Function["withIncrement"],
394 dX = __Function["withdX"],
395 avoidingRedundancy = __Function["withAvoidingRedundancy"],
396 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
397 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
398 mpEnabled = __Function["withmpEnabled"],
399 mpWorkers = __Function["withmpWorkers"],
401 self.__FO["Direct"] = Operator( fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC )
402 self.__FO["Tangent"] = Operator( fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC )
403 self.__FO["Adjoint"] = Operator( fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC )
404 elif isinstance(__Function, dict) and \
405 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
406 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
407 self.__FO["Direct"] = Operator( fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC )
408 self.__FO["Tangent"] = Operator( fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC )
409 self.__FO["Adjoint"] = Operator( fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC )
410 elif asMatrix is not None:
411 __matrice = numpy.matrix( __Matrix, numpy.float )
412 self.__FO["Direct"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC )
413 self.__FO["Tangent"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC )
414 self.__FO["Adjoint"] = Operator( fromMatrix = __matrice.T, avoidingRedundancy = avoidRC )
417 raise ValueError("Improperly defined observation operator, it requires at minima either a matrix, a Direct for approximate derivatives or a Tangent/Adjoint pair.")
419 if __appliedInX is not None:
420 self.__FO["AppliedInX"] = {}
421 for key in list(__appliedInX.keys()):
422 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
423 # Pour le cas où l'on a une vraie matrice
424 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
425 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
426 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
427 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
429 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
431 self.__FO["AppliedInX"] = None
437 "x.__repr__() <==> repr(x)"
438 return repr(self.__V)
441 "x.__str__() <==> str(x)"
444 # ==============================================================================
445 class Algorithm(object):
447 Classe générale d'interface de type algorithme
449 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
450 d'assimilation, en fournissant un container (dictionnaire) de variables
451 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
453 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
455 def __init__(self, name):
457 L'initialisation présente permet de fabriquer des variables de stockage
458 disponibles de manière générique dans les algorithmes élémentaires. Ces
459 variables de stockage sont ensuite conservées dans un dictionnaire
460 interne à l'objet, mais auquel on accède par la méthode "get".
462 Les variables prévues sont :
463 - CostFunctionJ : fonction-cout globale, somme des deux parties suivantes
464 - CostFunctionJb : partie ébauche ou background de la fonction-cout
465 - CostFunctionJo : partie observations de la fonction-cout
466 - GradientOfCostFunctionJ : gradient de la fonction-cout globale
467 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-cout
468 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-cout
469 - CurrentState : état courant lors d'itérations
470 - Analysis : l'analyse Xa
471 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
472 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
473 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
474 - Innovation : l'innovation : d = Y - H(X)
475 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
476 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
477 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
478 - MahalanobisConsistency : indicateur de consistance des covariances
479 - OMA : Observation moins Analysis : Y - Xa
480 - OMB : Observation moins Background : Y - Xb
481 - AMB : Analysis moins Background : Xa - Xb
482 - APosterioriCovariance : matrice A
483 - APosterioriVariances : variances de la matrice A
484 - APosterioriStandardDeviations : écart-types de la matrice A
485 - APosterioriCorrelations : correlations de la matrice A
486 - Residu : dans le cas des algorithmes de vérification
487 On peut rajouter des variables à stocker dans l'initialisation de
488 l'algorithme élémentaire qui va hériter de cette classe
490 logging.debug("%s Initialisation", str(name))
491 self._m = PlatformInfo.SystemUsage()
493 self._name = str( name )
494 self._parameters = {"StoreSupplementaryCalculations":[]}
495 self.__required_parameters = {}
496 self.__required_inputs = {"RequiredInputValues":{"mandatory":(), "optional":()}}
498 self.StoredVariables = {}
499 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
500 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
501 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
502 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
503 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
504 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
505 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
506 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
507 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
508 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
509 self.StoredVariables["PredictedState"] = Persistence.OneVector(name = "PredictedState")
510 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
511 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
512 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
513 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
514 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
515 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
516 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
517 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
518 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
519 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
520 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
521 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
522 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
523 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
524 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
525 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
526 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
527 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
528 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
529 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
530 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
532 def _pre_run(self, Parameters, Xb=None, Y=None, R=None, B=None, Q=None ):
534 logging.debug("%s Lancement", self._name)
535 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
537 # Mise a jour de self._parameters avec Parameters
538 self.__setParameters(Parameters)
540 # Corrections et complements
541 def __test_vvalue(argument, variable, argname):
543 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
544 raise ValueError("%s %s vector %s has to be properly defined!"%(self._name,argname,variable))
545 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
546 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,variable))
548 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,variable))
550 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,variable,numpy.array(argument).size))
552 __test_vvalue( Xb, "Xb", "Background or initial state" )
553 __test_vvalue( Y, "Y", "Observation" )
555 def __test_cvalue(argument, variable, argname):
557 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
558 raise ValueError("%s %s error covariance matrix %s has to be properly defined!"%(self._name,argname,variable))
559 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
560 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,variable))
562 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,variable))
564 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,variable))
566 __test_cvalue( R, "R", "Observation" )
567 __test_cvalue( B, "B", "Background" )
568 __test_cvalue( Q, "Q", "Evolution" )
570 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
571 logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
573 self._parameters["Bounds"] = None
575 if logging.getLogger().level < logging.WARNING:
576 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
577 if PlatformInfo.has_scipy:
578 import scipy.optimize
579 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
581 self._parameters["optmessages"] = 15
583 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
584 if PlatformInfo.has_scipy:
585 import scipy.optimize
586 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
588 self._parameters["optmessages"] = 15
592 def _post_run(self,_oH=None):
594 if ("StoreSupplementaryCalculations" in self._parameters) and \
595 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
596 for _A in self.StoredVariables["APosterioriCovariance"]:
597 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
598 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
599 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
600 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
601 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
602 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
603 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
604 self.StoredVariables["APosterioriCorrelations"].store( _C )
606 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
607 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
608 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
609 logging.debug("%s Terminé", self._name)
612 def _toStore(self, key):
613 "True if in StoreSupplementaryCalculations, else False"
614 return key in self._parameters["StoreSupplementaryCalculations"]
616 def get(self, key=None):
618 Renvoie l'une des variables stockées identifiée par la clé, ou le
619 dictionnaire de l'ensemble des variables disponibles en l'absence de
620 clé. Ce sont directement les variables sous forme objet qui sont
621 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
622 des classes de persistance.
625 return self.StoredVariables[key]
627 return self.StoredVariables
629 def __contains__(self, key=None):
630 "D.__contains__(k) -> True if D has a key k, else False"
631 return key in self.StoredVariables
634 "D.keys() -> list of D's keys"
635 if hasattr(self, "StoredVariables"):
636 return self.StoredVariables.keys()
641 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
642 if hasattr(self, "StoredVariables"):
643 return self.StoredVariables.pop(k, d)
648 raise TypeError("pop expected at least 1 arguments, got 0")
649 "If key is not found, d is returned if given, otherwise KeyError is raised"
655 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
657 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
658 sa forme mathématique la plus naturelle possible.
660 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
662 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None):
664 Permet de définir dans l'algorithme des paramètres requis et leurs
665 caractéristiques par défaut.
668 raise ValueError("A name is mandatory to define a required parameter.")
670 self.__required_parameters[name] = {
672 "typecast" : typecast,
678 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
680 def getRequiredParameters(self, noDetails=True):
682 Renvoie la liste des noms de paramètres requis ou directement le
683 dictionnaire des paramètres requis.
686 return sorted(self.__required_parameters.keys())
688 return self.__required_parameters
690 def setParameterValue(self, name=None, value=None):
692 Renvoie la valeur d'un paramètre requis de manière contrôlée
694 default = self.__required_parameters[name]["default"]
695 typecast = self.__required_parameters[name]["typecast"]
696 minval = self.__required_parameters[name]["minval"]
697 maxval = self.__required_parameters[name]["maxval"]
698 listval = self.__required_parameters[name]["listval"]
700 if value is None and default is None:
702 elif value is None and default is not None:
703 if typecast is None: __val = default
704 else: __val = typecast( default )
706 if typecast is None: __val = value
707 else: __val = typecast( value )
709 if minval is not None and (numpy.array(__val, float) < minval).any():
710 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be less than %s."%(name, __val, minval))
711 if maxval is not None and (numpy.array(__val, float) > maxval).any():
712 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be greater than %s."%(name, __val, maxval))
713 if listval is not None:
714 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
717 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%(v, name, listval))
718 elif __val not in listval:
719 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%( __val, name,listval))
722 def requireInputArguments(self, mandatory=(), optional=()):
724 Permet d'imposer des arguments requises en entrée
726 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
727 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
729 def __setParameters(self, fromDico={}):
731 Permet de stocker les paramètres reçus dans le dictionnaire interne.
733 self._parameters.update( fromDico )
734 for k in self.__required_parameters.keys():
735 if k in fromDico.keys():
736 self._parameters[k] = self.setParameterValue(k,fromDico[k])
738 self._parameters[k] = self.setParameterValue(k)
739 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
741 # ==============================================================================
742 class AlgorithmAndParameters(object):
744 Classe générale d'interface d'action pour l'algorithme et ses paramètres
747 name = "GenericAlgorithm",
754 self.__name = str(name)
758 self.__algorithm = {}
759 self.__algorithmFile = None
760 self.__algorithmName = None
762 self.updateParameters( asDict, asScript )
764 if asAlgorithm is None and asScript is not None:
765 __Algo = ImportFromScript(asScript).getvalue( "Algorithm" )
769 if __Algo is not None:
770 self.__A = str(__Algo)
771 self.__P.update( {"Algorithm":self.__A} )
773 self.__setAlgorithm( self.__A )
775 def updateParameters(self,
779 "Mise a jour des parametres"
780 if asDict is None and asScript is not None:
781 __Dict = ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
785 if __Dict is not None:
786 self.__P.update( dict(__Dict) )
788 def executePythonScheme(self, asDictAO = None):
789 "Permet de lancer le calcul d'assimilation"
790 Operator.CM.clearCache()
792 if not isinstance(asDictAO, dict):
793 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
794 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
795 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
796 else: self.__Xb = None
797 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
798 else: self.__Y = asDictAO["Observation"]
799 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
800 else: self.__U = asDictAO["ControlInput"]
801 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
802 else: self.__HO = asDictAO["ObservationOperator"]
803 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
804 else: self.__EM = asDictAO["EvolutionModel"]
805 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
806 else: self.__CM = asDictAO["ControlModel"]
807 self.__B = asDictAO["BackgroundError"]
808 self.__R = asDictAO["ObservationError"]
809 self.__Q = asDictAO["EvolutionError"]
811 self.__shape_validate()
813 self.__algorithm.run(
823 Parameters = self.__P,
827 def executeYACSScheme(self, FileName=None):
828 "Permet de lancer le calcul d'assimilation"
829 if FileName is None or not os.path.exists(FileName):
830 raise ValueError("a YACS file name has to be given for YACS execution.\n")
832 __file = os.path.abspath(FileName)
833 logging.debug("The YACS file name is \"%s\"."%__file)
834 if not PlatformInfo.has_salome or \
835 not PlatformInfo.has_yacs or \
836 not PlatformInfo.has_adao:
837 raise ImportError("\n\n"+\
838 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
839 "Please load the right environnement before trying to use it.\n")
844 SALOMERuntime.RuntimeSALOME_setRuntime()
846 r = pilot.getRuntime()
847 xmlLoader = loader.YACSLoader()
848 xmlLoader.registerProcCataLoader()
850 catalogAd = r.loadCatalog("proc", __file)
851 r.addCatalog(catalogAd)
856 p = xmlLoader.load(__file)
857 except IOError as ex:
858 print("The YACS XML schema file can not be loaded: %s"%(ex,))
860 logger = p.getLogger("parser")
861 if not logger.isEmpty():
862 print("The imported YACS XML schema has errors on parsing:")
863 print(logger.getStr())
866 print("The YACS XML schema is not valid and will not be executed:")
867 print(p.getErrorReport())
869 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
870 p.checkConsistency(info)
871 if info.areWarningsOrErrors():
872 print("The YACS XML schema is not coherent and will not be executed:")
873 print(info.getGlobalRepr())
875 e = pilot.ExecutorSwig()
877 if p.getEffectiveState() != pilot.DONE:
878 print(p.getErrorReport())
882 def get(self, key = None):
883 "Vérifie l'existence d'une clé de variable ou de paramètres"
884 if key in self.__algorithm:
885 return self.__algorithm.get( key )
886 elif key in self.__P:
892 "Necessaire pour le pickling"
893 return self.__algorithm.pop(k, d)
895 def getAlgorithmRequiredParameters(self, noDetails=True):
896 "Renvoie la liste des paramètres requis selon l'algorithme"
897 return self.__algorithm.getRequiredParameters(noDetails)
899 def setObserver(self, __V, __O, __I, __S):
900 if self.__algorithm is None \
901 or isinstance(self.__algorithm, dict) \
902 or not hasattr(self.__algorithm,"StoredVariables"):
903 raise ValueError("No observer can be build before choosing an algorithm.")
904 if __V not in self.__algorithm:
905 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
907 self.__algorithm.StoredVariables[ __V ].setDataObserver(
910 HookParameters = __I,
913 def removeObserver(self, __V, __O, __A = False):
914 if self.__algorithm is None \
915 or isinstance(self.__algorithm, dict) \
916 or not hasattr(self.__algorithm,"StoredVariables"):
917 raise ValueError("No observer can be removed before choosing an algorithm.")
918 if __V not in self.__algorithm:
919 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
921 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
926 def hasObserver(self, __V):
927 if self.__algorithm is None \
928 or isinstance(self.__algorithm, dict) \
929 or not hasattr(self.__algorithm,"StoredVariables"):
931 if __V not in self.__algorithm:
933 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
936 return list(self.__algorithm.keys()) + list(self.__P.keys())
938 def __contains__(self, key=None):
939 "D.__contains__(k) -> True if D has a key k, else False"
940 return key in self.__algorithm or key in self.__P
943 "x.__repr__() <==> repr(x)"
944 return repr(self.__A)+", "+repr(self.__P)
947 "x.__str__() <==> str(x)"
948 return str(self.__A)+", "+str(self.__P)
950 def __setAlgorithm(self, choice = None ):
952 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
953 d'assimilation. L'argument est un champ caractère se rapportant au nom
954 d'un algorithme réalisant l'opération sur les arguments fixes.
957 raise ValueError("Error: algorithm choice has to be given")
958 if self.__algorithmName is not None:
959 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
960 daDirectory = "daAlgorithms"
962 # Recherche explicitement le fichier complet
963 # ------------------------------------------
965 for directory in sys.path:
966 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
967 module_path = os.path.abspath(os.path.join(directory, daDirectory))
968 if module_path is None:
969 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
971 # Importe le fichier complet comme un module
972 # ------------------------------------------
974 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
975 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
976 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
977 raise ImportError("this module does not define a valid elementary algorithm.")
978 self.__algorithmName = str(choice)
979 sys.path = sys_path_tmp ; del sys_path_tmp
980 except ImportError as e:
981 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
983 # Instancie un objet du type élémentaire du fichier
984 # -------------------------------------------------
985 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
988 def __shape_validate(self):
990 Validation de la correspondance correcte des tailles des variables et
991 des matrices s'il y en a.
993 if self.__Xb is None: __Xb_shape = (0,)
994 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
995 elif hasattr(self.__Xb,"shape"):
996 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
997 else: __Xb_shape = self.__Xb.shape()
998 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1000 if self.__Y is None: __Y_shape = (0,)
1001 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1002 elif hasattr(self.__Y,"shape"):
1003 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1004 else: __Y_shape = self.__Y.shape()
1005 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1007 if self.__U is None: __U_shape = (0,)
1008 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1009 elif hasattr(self.__U,"shape"):
1010 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1011 else: __U_shape = self.__U.shape()
1012 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1014 if self.__B is None: __B_shape = (0,0)
1015 elif hasattr(self.__B,"shape"):
1016 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1017 else: __B_shape = self.__B.shape()
1018 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1020 if self.__R is None: __R_shape = (0,0)
1021 elif hasattr(self.__R,"shape"):
1022 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1023 else: __R_shape = self.__R.shape()
1024 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1026 if self.__Q is None: __Q_shape = (0,0)
1027 elif hasattr(self.__Q,"shape"):
1028 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1029 else: __Q_shape = self.__Q.shape()
1030 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1032 if len(self.__HO) == 0: __HO_shape = (0,0)
1033 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1034 elif hasattr(self.__HO["Direct"],"shape"):
1035 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1036 else: __HO_shape = self.__HO["Direct"].shape()
1037 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1039 if len(self.__EM) == 0: __EM_shape = (0,0)
1040 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1041 elif hasattr(self.__EM["Direct"],"shape"):
1042 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1043 else: __EM_shape = self.__EM["Direct"].shape()
1044 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1046 if len(self.__CM) == 0: __CM_shape = (0,0)
1047 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1048 elif hasattr(self.__CM["Direct"],"shape"):
1049 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1050 else: __CM_shape = self.__CM["Direct"].shape()
1051 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1053 # Vérification des conditions
1054 # ---------------------------
1055 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1056 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1057 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1058 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1060 if not( min(__B_shape) == max(__B_shape) ):
1061 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1062 if not( min(__R_shape) == max(__R_shape) ):
1063 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1064 if not( min(__Q_shape) == max(__Q_shape) ):
1065 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1066 if not( min(__EM_shape) == max(__EM_shape) ):
1067 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1069 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1070 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1071 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1072 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1073 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1074 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1075 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1076 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1078 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1079 if self.__algorithmName in ["EnsembleBlue",]:
1080 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1081 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1082 for member in asPersistentVector:
1083 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1084 __Xb_shape = min(__B_shape)
1086 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1088 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1089 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1091 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1092 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1094 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1095 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1097 if ("Bounds" in self.__P) \
1098 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1099 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1100 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1101 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1105 # ==============================================================================
1106 class RegulationAndParameters(object):
1108 Classe générale d'interface d'action pour la régulation et ses paramètres
1111 name = "GenericRegulation",
1118 self.__name = str(name)
1121 if asAlgorithm is None and asScript is not None:
1122 __Algo = ImportFromScript(asScript).getvalue( "Algorithm" )
1124 __Algo = asAlgorithm
1126 if asDict is None and asScript is not None:
1127 __Dict = ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1131 if __Dict is not None:
1132 self.__P.update( dict(__Dict) )
1134 if __Algo is not None:
1135 self.__P.update( {"Algorithm":self.__A} )
1137 def get(self, key = None):
1138 "Vérifie l'existence d'une clé de variable ou de paramètres"
1140 return self.__P[key]
1144 # ==============================================================================
1145 class DataObserver(object):
1147 Classe générale d'interface de type observer
1150 name = "GenericObserver",
1162 self.__name = str(name)
1167 if onVariable is None:
1168 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1169 elif type(onVariable) in (tuple, list):
1170 self.__V = tuple(map( str, onVariable ))
1171 if withInfo is None:
1174 self.__I = (str(withInfo),)*len(self.__V)
1175 elif isinstance(onVariable, str):
1176 self.__V = (onVariable,)
1177 if withInfo is None:
1178 self.__I = (onVariable,)
1180 self.__I = (str(withInfo),)
1182 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1184 if asString is not None:
1185 __FunctionText = asString
1186 elif (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1187 __FunctionText = Templates.ObserverTemplates[asTemplate]
1188 elif asScript is not None:
1189 __FunctionText = ImportFromScript(asScript).getstring()
1192 __Function = ObserverF(__FunctionText)
1194 if asObsObject is not None:
1195 self.__O = asObsObject
1197 self.__O = __Function.getfunc()
1199 for k in range(len(self.__V)):
1202 if ename not in withAlgo:
1203 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1205 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1208 "x.__repr__() <==> repr(x)"
1209 return repr(self.__V)+"\n"+repr(self.__O)
1212 "x.__str__() <==> str(x)"
1213 return str(self.__V)+"\n"+str(self.__O)
1215 # ==============================================================================
1216 class State(object):
1218 Classe générale d'interface de type état
1221 name = "GenericVector",
1223 asPersistentVector = None,
1229 toBeChecked = False,
1232 Permet de définir un vecteur :
1233 - asVector : entrée des données, comme un vecteur compatible avec le
1234 constructeur de numpy.matrix, ou "True" si entrée par script.
1235 - asPersistentVector : entrée des données, comme une série de vecteurs
1236 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1237 type Persistence, ou "True" si entrée par script.
1238 - asScript : si un script valide est donné contenant une variable
1239 nommée "name", la variable est de type "asVector" (par défaut) ou
1240 "asPersistentVector" selon que l'une de ces variables est placée à
1242 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1243 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1244 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1245 nommée "name"), on récupère les colonnes et on les range ligne après
1246 ligne (colMajor=False) ou colonne après colonne (colMajor=True). La
1247 variable résultante est de type "asVector" (par défaut) ou
1248 "asPersistentVector" selon que l'une de ces variables est placée à
1251 self.__name = str(name)
1252 self.__check = bool(toBeChecked)
1256 self.__is_vector = False
1257 self.__is_series = False
1259 if asScript is not None:
1260 __Vector, __Series = None, None
1261 if asPersistentVector:
1262 __Series = ImportFromScript(asScript).getvalue( self.__name )
1264 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1265 elif asDataFile is not None:
1266 __Vector, __Series = None, None
1267 if asPersistentVector:
1268 if colNames is not None:
1269 __Series = ImportFromFile(asDataFile).getvalue( colNames )[1]
1271 __Series = ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1272 if bool(colMajor) and not ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1273 __Series = numpy.transpose(__Series)
1274 elif not bool(colMajor) and ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1275 __Series = numpy.transpose(__Series)
1277 if colNames is not None:
1278 __Vector = ImportFromFile(asDataFile).getvalue( colNames )[1]
1280 __Vector = ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1282 __Vector = numpy.ravel(__Vector, order = "F")
1284 __Vector = numpy.ravel(__Vector, order = "C")
1286 __Vector, __Series = asVector, asPersistentVector
1288 if __Vector is not None:
1289 self.__is_vector = True
1290 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1291 self.shape = self.__V.shape
1292 self.size = self.__V.size
1293 elif __Series is not None:
1294 self.__is_series = True
1295 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1296 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1297 if isinstance(__Series, str): __Series = eval(__Series)
1298 for member in __Series:
1299 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1302 if isinstance(self.__V.shape, (tuple, list)):
1303 self.shape = self.__V.shape
1305 self.shape = self.__V.shape()
1306 if len(self.shape) == 1:
1307 self.shape = (self.shape[0],1)
1308 self.size = self.shape[0] * self.shape[1]
1310 raise ValueError("The %s object is improperly defined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1312 if scheduledBy is not None:
1313 self.__T = scheduledBy
1315 def getO(self, withScheduler=False):
1317 return self.__V, self.__T
1318 elif self.__T is None:
1324 "Vérification du type interne"
1325 return self.__is_vector
1328 "Vérification du type interne"
1329 return self.__is_series
1332 "x.__repr__() <==> repr(x)"
1333 return repr(self.__V)
1336 "x.__str__() <==> str(x)"
1337 return str(self.__V)
1339 # ==============================================================================
1340 class Covariance(object):
1342 Classe générale d'interface de type covariance
1345 name = "GenericCovariance",
1346 asCovariance = None,
1347 asEyeByScalar = None,
1348 asEyeByVector = None,
1351 toBeChecked = False,
1354 Permet de définir une covariance :
1355 - asCovariance : entrée des données, comme une matrice compatible avec
1356 le constructeur de numpy.matrix
1357 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1358 multiplicatif d'une matrice de corrélation identité, aucune matrice
1359 n'étant donc explicitement à donner
1360 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1361 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1362 n'étant donc explicitement à donner
1363 - asCovObject : entrée des données comme un objet python, qui a les
1364 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1365 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1366 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1367 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1368 pleine doit être vérifié
1370 self.__name = str(name)
1371 self.__check = bool(toBeChecked)
1374 self.__is_scalar = False
1375 self.__is_vector = False
1376 self.__is_matrix = False
1377 self.__is_object = False
1379 if asScript is not None:
1380 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1382 __Scalar = ImportFromScript(asScript).getvalue( self.__name )
1384 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1386 __Object = ImportFromScript(asScript).getvalue( self.__name )
1388 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
1390 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1392 if __Scalar is not None:
1393 if numpy.matrix(__Scalar).size != 1:
1394 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size)
1395 self.__is_scalar = True
1396 self.__C = numpy.abs( float(__Scalar) )
1399 elif __Vector is not None:
1400 self.__is_vector = True
1401 self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) )
1402 self.shape = (self.__C.size,self.__C.size)
1403 self.size = self.__C.size**2
1404 elif __Matrix is not None:
1405 self.__is_matrix = True
1406 self.__C = numpy.matrix( __Matrix, float )
1407 self.shape = self.__C.shape
1408 self.size = self.__C.size
1409 elif __Object is not None:
1410 self.__is_object = True
1412 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"):
1413 if not hasattr(self.__C,at):
1414 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1415 if hasattr(self.__C,"shape"):
1416 self.shape = self.__C.shape
1419 if hasattr(self.__C,"size"):
1420 self.size = self.__C.size
1425 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1429 def __validate(self):
1431 if self.__C is None:
1432 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1433 if self.ismatrix() and min(self.shape) != max(self.shape):
1434 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1435 if self.isobject() and min(self.shape) != max(self.shape):
1436 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1437 if self.isscalar() and self.__C <= 0:
1438 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1439 if self.isvector() and (self.__C <= 0).any():
1440 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1441 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1443 L = numpy.linalg.cholesky( self.__C )
1445 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1446 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1448 L = self.__C.cholesky()
1450 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1453 "Vérification du type interne"
1454 return self.__is_scalar
1457 "Vérification du type interne"
1458 return self.__is_vector
1461 "Vérification du type interne"
1462 return self.__is_matrix
1465 "Vérification du type interne"
1466 return self.__is_object
1471 return Covariance(self.__name+"I", asCovariance = self.__C.I )
1472 elif self.isvector():
1473 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1474 elif self.isscalar():
1475 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1476 elif self.isobject():
1477 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1479 return None # Indispensable
1484 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1485 elif self.isvector():
1486 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1487 elif self.isscalar():
1488 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1489 elif self.isobject():
1490 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1493 "Décomposition de Cholesky"
1495 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1496 elif self.isvector():
1497 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1498 elif self.isscalar():
1499 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1500 elif self.isobject() and hasattr(self.__C,"cholesky"):
1501 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1503 def choleskyI(self):
1504 "Inversion de la décomposition de Cholesky"
1506 return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I )
1507 elif self.isvector():
1508 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1509 elif self.isscalar():
1510 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1511 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1512 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1514 def diag(self, msize=None):
1515 "Diagonale de la matrice"
1517 return numpy.diag(self.__C)
1518 elif self.isvector():
1520 elif self.isscalar():
1522 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1524 return self.__C * numpy.ones(int(msize))
1525 elif self.isobject():
1526 return self.__C.diag()
1528 def asfullmatrix(self, msize=None):
1532 elif self.isvector():
1533 return numpy.matrix( numpy.diag(self.__C), float )
1534 elif self.isscalar():
1536 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1538 return numpy.matrix( self.__C * numpy.eye(int(msize)), float )
1539 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1540 return self.__C.asfullmatrix()
1542 def trace(self, msize=None):
1543 "Trace de la matrice"
1545 return numpy.trace(self.__C)
1546 elif self.isvector():
1547 return float(numpy.sum(self.__C))
1548 elif self.isscalar():
1550 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1552 return self.__C * int(msize)
1553 elif self.isobject():
1554 return self.__C.trace()
1560 "x.__repr__() <==> repr(x)"
1561 return repr(self.__C)
1564 "x.__str__() <==> str(x)"
1565 return str(self.__C)
1567 def __add__(self, other):
1568 "x.__add__(y) <==> x+y"
1569 if self.ismatrix() or self.isobject():
1570 return self.__C + numpy.asmatrix(other)
1571 elif self.isvector() or self.isscalar():
1572 _A = numpy.asarray(other)
1573 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
1574 return numpy.asmatrix(_A)
1576 def __radd__(self, other):
1577 "x.__radd__(y) <==> y+x"
1578 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
1580 def __sub__(self, other):
1581 "x.__sub__(y) <==> x-y"
1582 if self.ismatrix() or self.isobject():
1583 return self.__C - numpy.asmatrix(other)
1584 elif self.isvector() or self.isscalar():
1585 _A = numpy.asarray(other)
1586 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
1587 return numpy.asmatrix(_A)
1589 def __rsub__(self, other):
1590 "x.__rsub__(y) <==> y-x"
1591 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
1594 "x.__neg__() <==> -x"
1597 def __mul__(self, other):
1598 "x.__mul__(y) <==> x*y"
1599 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1600 return self.__C * other
1601 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1602 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1603 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1604 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1605 return self.__C * numpy.asmatrix(other)
1607 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
1608 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
1609 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1610 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
1611 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1612 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
1614 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1615 elif self.isscalar() and isinstance(other,numpy.matrix):
1616 return self.__C * other
1617 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
1618 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
1619 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1621 return self.__C * numpy.asmatrix(other)
1622 elif self.isobject():
1623 return self.__C.__mul__(other)
1625 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
1627 def __rmul__(self, other):
1628 "x.__rmul__(y) <==> y*x"
1629 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1630 return other * self.__C
1631 elif self.isvector() and isinstance(other,numpy.matrix):
1632 if numpy.ravel(other).size == self.shape[0]: # Vecteur
1633 return numpy.asmatrix(numpy.ravel(other) * self.__C)
1634 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
1635 return numpy.asmatrix(numpy.array(other) * self.__C)
1637 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
1638 elif self.isscalar() and isinstance(other,numpy.matrix):
1639 return other * self.__C
1640 elif self.isobject():
1641 return self.__C.__rmul__(other)
1643 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
1646 "x.__len__() <==> len(x)"
1647 return self.shape[0]
1649 # ==============================================================================
1650 class ObserverF(object):
1652 Creation d'une fonction d'observateur a partir de son texte
1654 def __init__(self, corps=""):
1655 self.__corps = corps
1656 def func(self,var,info):
1657 "Fonction d'observation"
1660 "Restitution du pointeur de fonction dans l'objet"
1663 # ==============================================================================
1664 class CaseLogger(object):
1666 Conservation des commandes de creation d'un cas
1668 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
1669 self.__name = str(__name)
1670 self.__objname = str(__objname)
1671 self.__logSerie = []
1672 self.__switchoff = False
1674 "TUI" :Interfaces._TUIViewer,
1675 "SCD" :Interfaces._SCDViewer,
1676 "YACS":Interfaces._YACSViewer,
1679 "TUI" :Interfaces._TUIViewer,
1680 "COM" :Interfaces._COMViewer,
1682 if __addViewers is not None:
1683 self.__viewers.update(dict(__addViewers))
1684 if __addLoaders is not None:
1685 self.__loaders.update(dict(__addLoaders))
1687 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
1688 "Enregistrement d'une commande individuelle"
1689 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
1690 if "self" in __keys: __keys.remove("self")
1691 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
1693 self.__switchoff = True
1695 self.__switchoff = False
1697 def dump(self, __filename=None, __format="TUI", __upa=""):
1698 "Restitution normalisée des commandes"
1699 if __format in self.__viewers:
1700 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
1702 raise ValueError("Dumping as \"%s\" is not available"%__format)
1703 return __formater.dump(__filename, __upa)
1705 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
1706 "Chargement normalisé des commandes"
1707 if __format in self.__loaders:
1708 __formater = self.__loaders[__format]()
1710 raise ValueError("Loading as \"%s\" is not available"%__format)
1711 return __formater.load(__filename, __content, __object)
1713 # ==============================================================================
1714 def MultiFonction( __xserie, _sFunction = lambda x: x ):
1716 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
1717 correspondante de valeurs de la fonction en argument
1719 if not PlatformInfo.isIterable( __xserie ):
1720 raise ValueError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
1723 for __xvalue in __xserie:
1724 __multiHX.append( _sFunction( __xvalue ) )
1728 # ==============================================================================
1729 def CostFunction3D(_x,
1730 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
1731 _HmX = None, # Simulation déjà faite de Hm(x)
1732 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
1737 _SIV = False, # A résorber pour la 8.0
1738 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
1739 _nPS = 0, # nbPreviousSteps
1740 _QM = "DA", # QualityMeasure
1741 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
1742 _fRt = False, # Restitue ou pas la sortie étendue
1743 _sSc = True, # Stocke ou pas les SSC
1746 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
1747 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
1748 DFO, QuantileRegression
1754 for k in ["CostFunctionJ",
1760 "SimulatedObservationAtCurrentOptimum",
1761 "SimulatedObservationAtCurrentState",
1765 if hasattr(_SSV[k],"store"):
1766 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
1768 _X = numpy.asmatrix(numpy.ravel( _x )).T
1769 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
1770 _SSV["CurrentState"].append( _X )
1772 if _HmX is not None:
1776 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
1780 _HX = _Hm( _X, *_arg )
1781 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
1783 if "SimulatedObservationAtCurrentState" in _SSC or \
1784 "SimulatedObservationAtCurrentOptimum" in _SSC:
1785 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
1787 if numpy.any(numpy.isnan(_HX)):
1788 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
1790 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
1791 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
1792 if _BI is None or _RI is None:
1793 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
1794 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
1795 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
1796 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1797 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
1799 raise ValueError("Observation error covariance matrix has to be properly defined!")
1801 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1802 elif _QM in ["LeastSquares", "LS", "L2"]:
1804 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
1805 elif _QM in ["AbsoluteValue", "L1"]:
1807 Jo = numpy.sum( numpy.abs(_Y - _HX) )
1808 elif _QM in ["MaximumError", "ME"]:
1810 Jo = numpy.max( numpy.abs(_Y - _HX) )
1811 elif _QM in ["QR", "Null"]:
1815 raise ValueError("Unknown asked quality measure!")
1817 J = float( Jb ) + float( Jo )
1820 _SSV["CostFunctionJb"].append( Jb )
1821 _SSV["CostFunctionJo"].append( Jo )
1822 _SSV["CostFunctionJ" ].append( J )
1824 if "IndexOfOptimum" in _SSC or \
1825 "CurrentOptimum" in _SSC or \
1826 "SimulatedObservationAtCurrentOptimum" in _SSC:
1827 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
1828 if "IndexOfOptimum" in _SSC:
1829 _SSV["IndexOfOptimum"].append( IndexMin )
1830 if "CurrentOptimum" in _SSC:
1831 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
1832 if "SimulatedObservationAtCurrentOptimum" in _SSC:
1833 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
1838 if _QM in ["QR"]: # Pour le QuantileRegression
1843 # ==============================================================================
1844 if __name__ == "__main__":
1845 print('\n AUTODIAGNOSTIC \n')