1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2018 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
34 from daCore import Persistence
35 from daCore import PlatformInfo
36 from daCore import Interfaces
37 from daCore import Templates
38 from daCore.Interfaces import ImportFromScript, ImportFromFile
40 # ==============================================================================
41 class CacheManager(object):
43 Classe générale de gestion d'un cache de calculs
46 toleranceInRedundancy = 1.e-18,
47 lenghtOfRedundancy = -1,
50 Les caractéristiques de tolérance peuvent être modifées à la création.
52 self.__tolerBP = float(toleranceInRedundancy)
53 self.__lenghtOR = int(lenghtOfRedundancy)
54 self.__initlnOR = self.__lenghtOR
59 self.__listOPCV = [] # Operator Previous Calculated Points, Results, Point Norms
60 # logging.debug("CM Tolerance de determination des doublons : %.2e", self.__tolerBP)
62 def wasCalculatedIn(self, xValue ): #, info="" ):
63 "Vérifie l'existence d'un calcul correspondant à la valeur"
66 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
67 if xValue.size != self.__listOPCV[i][0].size:
68 # logging.debug("CM Différence de la taille %s de X et de celle %s du point %i déjà calculé", xValue.shape,i,self.__listOPCP[i].shape)
70 if numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < self.__tolerBP * self.__listOPCV[i][2]:
72 __HxV = self.__listOPCV[i][1]
73 # logging.debug("CM Cas%s déja calculé, portant le numéro %i", info, i)
77 def storeValueInX(self, xValue, HxValue ):
78 "Stocke un calcul correspondant à la valeur"
79 if self.__lenghtOR < 0:
80 self.__lenghtOR = 2 * xValue.size + 2
81 self.__initlnOR = self.__lenghtOR
82 while len(self.__listOPCV) > self.__lenghtOR:
83 # logging.debug("CM Réduction de la liste des cas à %i éléments par suppression du premier", self.__lenghtOR)
84 self.__listOPCV.pop(0)
85 self.__listOPCV.append( (
86 copy.copy(numpy.ravel(xValue)),
88 numpy.linalg.norm(xValue),
93 self.__initlnOR = self.__lenghtOR
98 self.__lenghtOR = self.__initlnOR
100 # ==============================================================================
101 class Operator(object):
103 Classe générale d'interface de type opérateur simple
110 def __init__(self, fromMethod=None, fromMatrix=None, avoidingRedundancy = True):
112 On construit un objet de ce type en fournissant à l'aide de l'un des
113 deux mots-clé, soit une fonction python, soit une matrice.
115 - fromMethod : argument de type fonction Python
116 - fromMatrix : argument adapté au constructeur numpy.matrix
117 - avoidingRedundancy : évite ou pas les calculs redondants
119 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
120 self.__AvoidRC = bool( avoidingRedundancy )
121 if fromMethod is not None:
122 self.__Method = fromMethod # logtimer(fromMethod)
124 self.__Type = "Method"
125 elif fromMatrix is not None:
127 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
128 self.__Type = "Matrix"
134 def disableAvoidingRedundancy(self):
136 Operator.CM.disable()
138 def enableAvoidingRedundancy(self):
143 Operator.CM.disable()
149 def appliedTo(self, xValue, HValue = None):
151 Permet de restituer le résultat de l'application de l'opérateur à un
152 argument xValue. Cette méthode se contente d'appliquer, son argument
153 devant a priori être du bon type.
155 - xValue : argument adapté pour appliquer l'opérateur
157 if HValue is not None:
158 HxValue = numpy.asmatrix( numpy.ravel( HValue ) ).T
160 Operator.CM.storeValueInX(xValue,HxValue)
163 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xValue)
165 __alreadyCalculated = False
167 if __alreadyCalculated:
168 self.__addOneCacheCall()
171 if self.__Matrix is not None:
172 self.__addOneMatrixCall()
173 HxValue = self.__Matrix * xValue
175 self.__addOneMethodCall()
176 HxValue = self.__Method( xValue )
178 Operator.CM.storeValueInX(xValue,HxValue)
182 def appliedControledFormTo(self, paire ):
184 Permet de restituer le résultat de l'application de l'opérateur à une
185 paire (xValue, uValue). Cette méthode se contente d'appliquer, son
186 argument devant a priori être du bon type. Si la uValue est None,
187 on suppose que l'opérateur ne s'applique qu'à xValue.
189 - xValue : argument X adapté pour appliquer l'opérateur
190 - uValue : argument U adapté pour appliquer l'opérateur
192 assert len(paire) == 2, "Incorrect number of arguments"
193 xValue, uValue = paire
194 if self.__Matrix is not None:
195 self.__addOneMatrixCall()
196 return self.__Matrix * xValue
197 elif uValue is not None:
198 self.__addOneMethodCall()
199 return self.__Method( (xValue, uValue) )
201 self.__addOneMethodCall()
202 return self.__Method( xValue )
204 def appliedInXTo(self, paire ):
206 Permet de restituer le résultat de l'application de l'opérateur à un
207 argument xValue, sachant que l'opérateur est valable en xNominal.
208 Cette méthode se contente d'appliquer, son argument devant a priori
209 être du bon type. Si l'opérateur est linéaire car c'est une matrice,
210 alors il est valable en tout point nominal et il n'est pas nécessaire
212 Arguments : une liste contenant
213 - xNominal : argument permettant de donner le point où l'opérateur
214 est construit pour etre ensuite appliqué
215 - xValue : argument adapté pour appliquer l'opérateur
217 assert len(paire) == 2, "Incorrect number of arguments"
218 xNominal, xValue = paire
219 if self.__Matrix is not None:
220 self.__addOneMatrixCall()
221 return self.__Matrix * xValue
223 self.__addOneMethodCall()
224 return self.__Method( (xNominal, xValue) )
226 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue"):
228 Permet de renvoyer l'opérateur sous la forme d'une matrice
230 if self.__Matrix is not None:
231 self.__addOneMatrixCall()
233 elif ValueForMethodForm is not "UnknownVoidValue": # Ne pas utiliser "None"
234 self.__addOneMethodCall()
235 return numpy.matrix( self.__Method( (ValueForMethodForm, None) ) )
237 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
241 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
242 la forme d'une matrice
244 if self.__Matrix is not None:
245 return self.__Matrix.shape
247 raise ValueError("Matrix form of the operator is not available, nor the shape")
249 def nbcalls(self, which=None):
251 Renvoie les nombres d'évaluations de l'opérateur
254 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
255 self.__NbCallsAsMatrix,
256 self.__NbCallsAsMethod,
257 self.__NbCallsOfCached,
258 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
259 Operator.NbCallsAsMatrix,
260 Operator.NbCallsAsMethod,
261 Operator.NbCallsOfCached,
263 if which is None: return __nbcalls
264 else: return __nbcalls[which]
266 def __addOneMatrixCall(self):
267 "Comptabilise un appel"
268 self.__NbCallsAsMatrix += 1 # Decompte local
269 Operator.NbCallsAsMatrix += 1 # Decompte global
271 def __addOneMethodCall(self):
272 "Comptabilise un appel"
273 self.__NbCallsAsMethod += 1 # Decompte local
274 Operator.NbCallsAsMethod += 1 # Decompte global
276 def __addOneCacheCall(self):
277 "Comptabilise un appel"
278 self.__NbCallsOfCached += 1 # Decompte local
279 Operator.NbCallsOfCached += 1 # Decompte global
281 # ==============================================================================
282 class FullOperator(object):
284 Classe générale d'interface de type opérateur complet
285 (Direct, Linéaire Tangent, Adjoint)
288 name = "GenericFullOperator",
290 asOneFunction = None, # Fonction
291 asThreeFunctions = None, # Fonctions dictionary
292 asScript = None, # Fonction(s) script
293 asDict = None, # Parameters
300 self.__name = str(name)
301 self.__check = bool(toBeChecked)
306 if (asDict is not None) and isinstance(asDict, dict):
307 __Parameters.update( asDict )
308 if "DifferentialIncrement" in asDict:
309 __Parameters["withIncrement"] = asDict["DifferentialIncrement"]
310 if "CenteredFiniteDifference" in asDict:
311 __Parameters["withCenteredDF"] = asDict["CenteredFiniteDifference"]
312 if "EnableMultiProcessing" in asDict:
313 __Parameters["withmpEnabled"] = asDict["EnableMultiProcessing"]
314 if "NumberOfProcesses" in asDict:
315 __Parameters["withmpWorkers"] = asDict["NumberOfProcesses"]
317 if asScript is not None:
318 __Matrix, __Function = None, None
320 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
322 __Function = { "Direct":ImportFromScript(asScript).getvalue( "DirectOperator" ) }
323 __Function.update({"useApproximatedDerivatives":True})
324 __Function.update(__Parameters)
325 elif asThreeFunctions:
327 "Direct" :ImportFromScript(asScript).getvalue( "DirectOperator" ),
328 "Tangent":ImportFromScript(asScript).getvalue( "TangentOperator" ),
329 "Adjoint":ImportFromScript(asScript).getvalue( "AdjointOperator" ),
331 __Function.update(__Parameters)
334 if asOneFunction is not None:
335 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
336 if asOneFunction["Direct"] is not None:
337 __Function = asOneFunction
339 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
341 __Function = { "Direct":asOneFunction }
342 __Function.update({"useApproximatedDerivatives":True})
343 __Function.update(__Parameters)
344 elif asThreeFunctions is not None:
345 if isinstance(asThreeFunctions, dict) and \
346 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
347 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
348 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
349 __Function = asThreeFunctions
350 elif isinstance(asThreeFunctions, dict) and \
351 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
352 __Function = asThreeFunctions
353 __Function.update({"useApproximatedDerivatives":True})
355 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
356 if "Direct" not in asThreeFunctions:
357 __Function["Direct"] = asThreeFunctions["Tangent"]
358 __Function.update(__Parameters)
362 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
363 # for k in ("Direct", "Tangent", "Adjoint"):
364 # if k in __Function and hasattr(__Function[k],"__class__"):
365 # if type(__Function[k]) is type(self.__init__):
366 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
368 if appliedInX is not None and isinstance(appliedInX, dict):
369 __appliedInX = appliedInX
370 elif appliedInX is not None:
371 __appliedInX = {"HXb":appliedInX}
375 if scheduledBy is not None:
376 self.__T = scheduledBy
378 if isinstance(__Function, dict) and \
379 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
380 ("Direct" in __Function) and (__Function["Direct"] is not None):
381 if "withCenteredDF" not in __Function: __Function["withCenteredDF"] = False
382 if "withIncrement" not in __Function: __Function["withIncrement"] = 0.01
383 if "withdX" not in __Function: __Function["withdX"] = None
384 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = True
385 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
386 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
387 if "withmpEnabled" not in __Function: __Function["withmpEnabled"] = False
388 if "withmpWorkers" not in __Function: __Function["withmpWorkers"] = None
389 from daNumerics.ApproximatedDerivatives import FDApproximation
390 FDA = FDApproximation(
391 Function = __Function["Direct"],
392 centeredDF = __Function["withCenteredDF"],
393 increment = __Function["withIncrement"],
394 dX = __Function["withdX"],
395 avoidingRedundancy = __Function["withAvoidingRedundancy"],
396 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
397 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
398 mpEnabled = __Function["withmpEnabled"],
399 mpWorkers = __Function["withmpWorkers"],
401 self.__FO["Direct"] = Operator( fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC )
402 self.__FO["Tangent"] = Operator( fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC )
403 self.__FO["Adjoint"] = Operator( fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC )
404 elif isinstance(__Function, dict) and \
405 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
406 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
407 self.__FO["Direct"] = Operator( fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC )
408 self.__FO["Tangent"] = Operator( fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC )
409 self.__FO["Adjoint"] = Operator( fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC )
410 elif asMatrix is not None:
411 __matrice = numpy.matrix( __Matrix, numpy.float )
412 self.__FO["Direct"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC )
413 self.__FO["Tangent"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC )
414 self.__FO["Adjoint"] = Operator( fromMatrix = __matrice.T, avoidingRedundancy = avoidRC )
417 raise ValueError("Improperly defined observation operator, it requires at minima either a matrix, a Direct for approximate derivatives or a Tangent/Adjoint pair.")
419 if __appliedInX is not None:
420 self.__FO["AppliedInX"] = {}
421 if not isinstance(__appliedInX, dict):
422 raise ValueError("Error: observation operator defined by \"AppliedInX\" need a dictionary as argument.")
423 for key in list(__appliedInX.keys()):
424 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
425 # Pour le cas où l'on a une vraie matrice
426 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
427 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
428 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
429 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
431 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
433 self.__FO["AppliedInX"] = None
439 "x.__repr__() <==> repr(x)"
440 return repr(self.__V)
443 "x.__str__() <==> str(x)"
446 # ==============================================================================
447 class Algorithm(object):
449 Classe générale d'interface de type algorithme
451 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
452 d'assimilation, en fournissant un container (dictionnaire) de variables
453 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
455 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
457 def __init__(self, name):
459 L'initialisation présente permet de fabriquer des variables de stockage
460 disponibles de manière générique dans les algorithmes élémentaires. Ces
461 variables de stockage sont ensuite conservées dans un dictionnaire
462 interne à l'objet, mais auquel on accède par la méthode "get".
464 Les variables prévues sont :
465 - CostFunctionJ : fonction-cout globale, somme des deux parties suivantes
466 - CostFunctionJb : partie ébauche ou background de la fonction-cout
467 - CostFunctionJo : partie observations de la fonction-cout
468 - GradientOfCostFunctionJ : gradient de la fonction-cout globale
469 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-cout
470 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-cout
471 - CurrentState : état courant lors d'itérations
472 - Analysis : l'analyse Xa
473 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
474 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
475 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
476 - Innovation : l'innovation : d = Y - H(X)
477 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
478 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
479 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
480 - MahalanobisConsistency : indicateur de consistance des covariances
481 - OMA : Observation moins Analysis : Y - Xa
482 - OMB : Observation moins Background : Y - Xb
483 - AMB : Analysis moins Background : Xa - Xb
484 - APosterioriCovariance : matrice A
485 - APosterioriVariances : variances de la matrice A
486 - APosterioriStandardDeviations : écart-types de la matrice A
487 - APosterioriCorrelations : correlations de la matrice A
488 - Residu : dans le cas des algorithmes de vérification
489 On peut rajouter des variables à stocker dans l'initialisation de
490 l'algorithme élémentaire qui va hériter de cette classe
492 logging.debug("%s Initialisation", str(name))
493 self._m = PlatformInfo.SystemUsage()
495 self._name = str( name )
496 self._parameters = {"StoreSupplementaryCalculations":[]}
497 self.__required_parameters = {}
498 self.__required_inputs = {"RequiredInputValues":{"mandatory":(), "optional":()}}
500 self.StoredVariables = {}
501 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
502 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
503 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
504 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
505 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
506 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
507 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
508 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
509 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
510 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
511 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
512 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
513 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
514 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
515 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
516 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
517 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
518 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
519 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
520 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
521 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
522 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
523 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
524 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
525 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
526 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
527 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
528 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
529 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
530 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
531 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
533 def _pre_run(self, Parameters, Xb=None, Y=None, R=None, B=None, Q=None ):
535 logging.debug("%s Lancement", self._name)
536 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
538 # Mise a jour de self._parameters avec Parameters
539 self.__setParameters(Parameters)
541 # Corrections et complements
542 def __test_vvalue( argument, variable, argname):
544 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
545 raise ValueError("%s %s vector %s has to be properly defined!"%(self._name,argname,variable))
546 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
547 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,variable))
549 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,variable))
551 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,variable,numpy.array(argument).size))
552 __test_vvalue( Xb, "Xb", "Background or initial state" )
553 __test_vvalue( Y, "Y", "Observation" )
554 def __test_cvalue( argument, variable, argname):
556 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
557 raise ValueError("%s %s error covariance matrix %s has to be properly defined!"%(self._name,argname,variable))
558 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
559 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,variable))
561 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,variable))
563 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,variable))
564 __test_cvalue( R, "R", "Observation" )
565 __test_cvalue( B, "B", "Background" )
566 __test_cvalue( Q, "Q", "Evolution" )
568 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
569 logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
571 self._parameters["Bounds"] = None
573 if logging.getLogger().level < logging.WARNING:
574 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
575 if PlatformInfo.has_scipy:
576 import scipy.optimize
577 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
579 self._parameters["optmessages"] = 15
581 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
582 if PlatformInfo.has_scipy:
583 import scipy.optimize
584 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
586 self._parameters["optmessages"] = 15
590 def _post_run(self,_oH=None):
592 if ("StoreSupplementaryCalculations" in self._parameters) and \
593 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
594 for _A in self.StoredVariables["APosterioriCovariance"]:
595 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
596 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
597 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
598 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
599 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
600 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
601 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
602 self.StoredVariables["APosterioriCorrelations"].store( _C )
604 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
605 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
606 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
607 logging.debug("%s Terminé", self._name)
610 def get(self, key=None):
612 Renvoie l'une des variables stockées identifiée par la clé, ou le
613 dictionnaire de l'ensemble des variables disponibles en l'absence de
614 clé. Ce sont directement les variables sous forme objet qui sont
615 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
616 des classes de persistance.
619 return self.StoredVariables[key]
621 return self.StoredVariables
623 def __contains__(self, key=None):
624 "D.__contains__(k) -> True if D has a key k, else False"
625 return key in self.StoredVariables
628 "D.keys() -> list of D's keys"
629 if hasattr(self, "StoredVariables"):
630 return self.StoredVariables.keys()
635 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
636 if hasattr(self, "StoredVariables"):
637 return self.StoredVariables.pop(k, d)
642 raise TypeError("pop expected at least 1 arguments, got 0")
643 "If key is not found, d is returned if given, otherwise KeyError is raised"
649 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
651 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
652 sa forme mathématique la plus naturelle possible.
654 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
656 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None):
658 Permet de définir dans l'algorithme des paramètres requis et leurs
659 caractéristiques par défaut.
662 raise ValueError("A name is mandatory to define a required parameter.")
664 self.__required_parameters[name] = {
666 "typecast" : typecast,
672 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
674 def getRequiredParameters(self, noDetails=True):
676 Renvoie la liste des noms de paramètres requis ou directement le
677 dictionnaire des paramètres requis.
680 return sorted(self.__required_parameters.keys())
682 return self.__required_parameters
684 def setParameterValue(self, name=None, value=None):
686 Renvoie la valeur d'un paramètre requis de manière contrôlée
688 default = self.__required_parameters[name]["default"]
689 typecast = self.__required_parameters[name]["typecast"]
690 minval = self.__required_parameters[name]["minval"]
691 maxval = self.__required_parameters[name]["maxval"]
692 listval = self.__required_parameters[name]["listval"]
694 if value is None and default is None:
696 elif value is None and default is not None:
697 if typecast is None: __val = default
698 else: __val = typecast( default )
700 if typecast is None: __val = value
701 else: __val = typecast( value )
703 if minval is not None and (numpy.array(__val, float) < minval).any():
704 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be less than %s."%(name, __val, minval))
705 if maxval is not None and (numpy.array(__val, float) > maxval).any():
706 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be greater than %s."%(name, __val, maxval))
707 if listval is not None:
708 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
711 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%(v, name, listval))
712 elif __val not in listval:
713 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%( __val, name,listval))
716 def requireInputArguments(self, mandatory=(), optional=()):
718 Permet d'imposer des arguments requises en entrée
720 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
721 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
723 def __setParameters(self, fromDico={}):
725 Permet de stocker les paramètres reçus dans le dictionnaire interne.
727 self._parameters.update( fromDico )
728 for k in self.__required_parameters.keys():
729 if k in fromDico.keys():
730 self._parameters[k] = self.setParameterValue(k,fromDico[k])
732 self._parameters[k] = self.setParameterValue(k)
733 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
735 # ==============================================================================
736 class AlgorithmAndParameters(object):
738 Classe générale d'interface d'action pour l'algorithme et ses paramètres
741 name = "GenericAlgorithm",
748 self.__name = str(name)
752 self.__algorithm = {}
753 self.__algorithmFile = None
754 self.__algorithmName = None
756 self.updateParameters( asDict, asScript )
758 if asAlgorithm is None and asScript is not None:
759 __Algo = ImportFromScript(asScript).getvalue( "Algorithm" )
763 if __Algo is not None:
764 self.__A = str(__Algo)
765 self.__P.update( {"Algorithm":self.__A} )
767 self.__setAlgorithm( self.__A )
769 def updateParameters(self,
773 "Mise a jour des parametres"
774 if asDict is None and asScript is not None:
775 __Dict = ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
779 if __Dict is not None:
780 self.__P.update( dict(__Dict) )
782 def executePythonScheme(self, asDictAO = None):
783 "Permet de lancer le calcul d'assimilation"
784 Operator.CM.clearCache()
786 if not isinstance(asDictAO, dict):
787 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
788 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
789 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
790 else: self.__Xb = None
791 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
792 else: self.__Y = asDictAO["Observation"]
793 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
794 else: self.__U = asDictAO["ControlInput"]
795 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
796 else: self.__HO = asDictAO["ObservationOperator"]
797 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
798 else: self.__EM = asDictAO["EvolutionModel"]
799 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
800 else: self.__CM = asDictAO["ControlModel"]
801 self.__B = asDictAO["BackgroundError"]
802 self.__R = asDictAO["ObservationError"]
803 self.__Q = asDictAO["EvolutionError"]
805 self.__shape_validate()
807 self.__algorithm.run(
817 Parameters = self.__P,
821 def executeYACSScheme(self, FileName=None):
822 "Permet de lancer le calcul d'assimilation"
823 if FileName is None or not os.path.exists(FileName):
824 raise ValueError("a YACS file name has to be given for YACS execution.\n")
826 __file = os.path.abspath(FileName)
827 logging.debug("The YACS file name is \"%s\"."%__file)
828 if not PlatformInfo.has_salome or \
829 not PlatformInfo.has_yacs or \
830 not PlatformInfo.has_adao:
831 raise ImportError("\n\n"+\
832 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
833 "Please load the right environnement before trying to use it.\n")
838 SALOMERuntime.RuntimeSALOME_setRuntime()
840 r = pilot.getRuntime()
841 xmlLoader = loader.YACSLoader()
842 xmlLoader.registerProcCataLoader()
844 catalogAd = r.loadCatalog("proc", __file)
845 r.addCatalog(catalogAd)
850 p = xmlLoader.load(__file)
851 except IOError as ex:
852 print("The YACS XML schema file can not be loaded: %s"%(ex,))
854 logger = p.getLogger("parser")
855 if not logger.isEmpty():
856 print("The imported YACS XML schema has errors on parsing:")
857 print(logger.getStr())
860 print("The YACS XML schema is not valid and will not be executed:")
861 print(p.getErrorReport())
863 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
864 p.checkConsistency(info)
865 if info.areWarningsOrErrors():
866 print("The YACS XML schema is not coherent and will not be executed:")
867 print(info.getGlobalRepr())
869 e = pilot.ExecutorSwig()
871 if p.getEffectiveState() != pilot.DONE:
872 print(p.getErrorReport())
876 def get(self, key = None):
877 "Vérifie l'existence d'une clé de variable ou de paramètres"
878 if key in self.__algorithm:
879 return self.__algorithm.get( key )
880 elif key in self.__P:
886 "Necessaire pour le pickling"
887 return self.__algorithm.pop(k, d)
889 def getAlgorithmRequiredParameters(self, noDetails=True):
890 "Renvoie la liste des paramètres requis selon l'algorithme"
891 return self.__algorithm.getRequiredParameters(noDetails)
893 def setObserver(self, __V, __O, __I, __S):
894 if self.__algorithm is None \
895 or isinstance(self.__algorithm, dict) \
896 or not hasattr(self.__algorithm,"StoredVariables"):
897 raise ValueError("No observer can be build before choosing an algorithm.")
898 if __V not in self.__algorithm:
899 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
901 self.__algorithm.StoredVariables[ __V ].setDataObserver(
904 HookParameters = __I,
907 def removeObserver(self, __V, __O, __A = False):
908 if self.__algorithm is None \
909 or isinstance(self.__algorithm, dict) \
910 or not hasattr(self.__algorithm,"StoredVariables"):
911 raise ValueError("No observer can be removed before choosing an algorithm.")
912 if __V not in self.__algorithm:
913 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
915 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
920 def hasObserver(self, __V):
921 if self.__algorithm is None \
922 or isinstance(self.__algorithm, dict) \
923 or not hasattr(self.__algorithm,"StoredVariables"):
925 if __V not in self.__algorithm:
927 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
930 return list(self.__algorithm.keys()) + list(self.__P.keys())
932 def __contains__(self, key=None):
933 "D.__contains__(k) -> True if D has a key k, else False"
934 return key in self.__algorithm or key in self.__P
937 "x.__repr__() <==> repr(x)"
938 return repr(self.__A)+", "+repr(self.__P)
941 "x.__str__() <==> str(x)"
942 return str(self.__A)+", "+str(self.__P)
944 def __setAlgorithm(self, choice = None ):
946 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
947 d'assimilation. L'argument est un champ caractère se rapportant au nom
948 d'un algorithme réalisant l'opération sur les arguments fixes.
951 raise ValueError("Error: algorithm choice has to be given")
952 if self.__algorithmName is not None:
953 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
954 daDirectory = "daAlgorithms"
956 # Recherche explicitement le fichier complet
957 # ------------------------------------------
959 for directory in sys.path:
960 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
961 module_path = os.path.abspath(os.path.join(directory, daDirectory))
962 if module_path is None:
963 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
965 # Importe le fichier complet comme un module
966 # ------------------------------------------
968 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
969 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
970 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
971 raise ImportError("this module does not define a valid elementary algorithm.")
972 self.__algorithmName = str(choice)
973 sys.path = sys_path_tmp ; del sys_path_tmp
974 except ImportError as e:
975 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
977 # Instancie un objet du type élémentaire du fichier
978 # -------------------------------------------------
979 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
982 def __shape_validate(self):
984 Validation de la correspondance correcte des tailles des variables et
985 des matrices s'il y en a.
987 if self.__Xb is None: __Xb_shape = (0,)
988 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
989 elif hasattr(self.__Xb,"shape"):
990 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
991 else: __Xb_shape = self.__Xb.shape()
992 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
994 if self.__Y is None: __Y_shape = (0,)
995 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
996 elif hasattr(self.__Y,"shape"):
997 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
998 else: __Y_shape = self.__Y.shape()
999 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1001 if self.__U is None: __U_shape = (0,)
1002 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1003 elif hasattr(self.__U,"shape"):
1004 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1005 else: __U_shape = self.__U.shape()
1006 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1008 if self.__B is None: __B_shape = (0,0)
1009 elif hasattr(self.__B,"shape"):
1010 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1011 else: __B_shape = self.__B.shape()
1012 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1014 if self.__R is None: __R_shape = (0,0)
1015 elif hasattr(self.__R,"shape"):
1016 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1017 else: __R_shape = self.__R.shape()
1018 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1020 if self.__Q is None: __Q_shape = (0,0)
1021 elif hasattr(self.__Q,"shape"):
1022 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1023 else: __Q_shape = self.__Q.shape()
1024 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1026 if len(self.__HO) == 0: __HO_shape = (0,0)
1027 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1028 elif hasattr(self.__HO["Direct"],"shape"):
1029 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1030 else: __HO_shape = self.__HO["Direct"].shape()
1031 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1033 if len(self.__EM) == 0: __EM_shape = (0,0)
1034 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1035 elif hasattr(self.__EM["Direct"],"shape"):
1036 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1037 else: __EM_shape = self.__EM["Direct"].shape()
1038 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1040 if len(self.__CM) == 0: __CM_shape = (0,0)
1041 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1042 elif hasattr(self.__CM["Direct"],"shape"):
1043 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1044 else: __CM_shape = self.__CM["Direct"].shape()
1045 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1047 # Vérification des conditions
1048 # ---------------------------
1049 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1050 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1051 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1052 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1054 if not( min(__B_shape) == max(__B_shape) ):
1055 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1056 if not( min(__R_shape) == max(__R_shape) ):
1057 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1058 if not( min(__Q_shape) == max(__Q_shape) ):
1059 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1060 if not( min(__EM_shape) == max(__EM_shape) ):
1061 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1063 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1064 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1065 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1066 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1067 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1068 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1069 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1070 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1072 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1073 if self.__algorithmName in ["EnsembleBlue",]:
1074 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1075 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1076 for member in asPersistentVector:
1077 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1078 __Xb_shape = min(__B_shape)
1080 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1082 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1083 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1085 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1086 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1088 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1089 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1091 if ("Bounds" in self.__P) \
1092 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1093 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1094 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1095 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1099 # ==============================================================================
1100 class RegulationAndParameters(object):
1102 Classe générale d'interface d'action pour la régulation et ses paramètres
1105 name = "GenericRegulation",
1112 self.__name = str(name)
1115 if asAlgorithm is None and asScript is not None:
1116 __Algo = ImportFromScript(asScript).getvalue( "Algorithm" )
1118 __Algo = asAlgorithm
1120 if asDict is None and asScript is not None:
1121 __Dict = ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1125 if __Dict is not None:
1126 self.__P.update( dict(__Dict) )
1128 if __Algo is not None:
1129 self.__P.update( {"Algorithm":self.__A} )
1131 def get(self, key = None):
1132 "Vérifie l'existence d'une clé de variable ou de paramètres"
1134 return self.__P[key]
1138 # ==============================================================================
1139 class DataObserver(object):
1141 Classe générale d'interface de type observer
1144 name = "GenericObserver",
1156 self.__name = str(name)
1161 if onVariable is None:
1162 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1163 elif type(onVariable) in (tuple, list):
1164 self.__V = tuple(map( str, onVariable ))
1165 if withInfo is None:
1168 self.__I = (str(withInfo),)*len(self.__V)
1169 elif isinstance(onVariable, str):
1170 self.__V = (onVariable,)
1171 if withInfo is None:
1172 self.__I = (onVariable,)
1174 self.__I = (str(withInfo),)
1176 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1178 if asString is not None:
1179 __FunctionText = asString
1180 elif (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1181 __FunctionText = Templates.ObserverTemplates[asTemplate]
1182 elif asScript is not None:
1183 __FunctionText = ImportFromScript(asScript).getstring()
1186 __Function = ObserverF(__FunctionText)
1188 if asObsObject is not None:
1189 self.__O = asObsObject
1191 self.__O = __Function.getfunc()
1193 for k in range(len(self.__V)):
1196 if ename not in withAlgo:
1197 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1199 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1202 "x.__repr__() <==> repr(x)"
1203 return repr(self.__V)+"\n"+repr(self.__O)
1206 "x.__str__() <==> str(x)"
1207 return str(self.__V)+"\n"+str(self.__O)
1209 # ==============================================================================
1210 class State(object):
1212 Classe générale d'interface de type état
1215 name = "GenericVector",
1217 asPersistentVector = None,
1223 toBeChecked = False,
1226 Permet de définir un vecteur :
1227 - asVector : entrée des données, comme un vecteur compatible avec le
1228 constructeur de numpy.matrix, ou "True" si entrée par script.
1229 - asPersistentVector : entrée des données, comme une série de vecteurs
1230 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1231 type Persistence, ou "True" si entrée par script.
1232 - asScript : si un script valide est donné contenant une variable
1233 nommée "name", la variable est de type "asVector" (par défaut) ou
1234 "asPersistentVector" selon que l'une de ces variables est placée à
1236 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1237 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1238 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1239 nommée "name"), on récupère les colonnes et on les range ligne après
1240 ligne (colMajor=False) ou colonne après colonne (colMajor=True). La
1241 variable résultante est de type "asVector" (par défaut) ou
1242 "asPersistentVector" selon que l'une de ces variables est placée à
1245 self.__name = str(name)
1246 self.__check = bool(toBeChecked)
1250 self.__is_vector = False
1251 self.__is_series = False
1253 if asScript is not None:
1254 __Vector, __Series = None, None
1255 if asPersistentVector:
1256 __Series = ImportFromScript(asScript).getvalue( self.__name )
1258 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1259 elif asDataFile is not None:
1260 __Vector, __Series = None, None
1261 if asPersistentVector:
1262 if colNames is not None:
1263 __Series = ImportFromFile(asDataFile).getvalue( colNames )[1]
1265 __Series = ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1266 if bool(colMajor) and not ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1267 __Series = numpy.transpose(__Series)
1268 elif not bool(colMajor) and ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1269 __Series = numpy.transpose(__Series)
1271 if colNames is not None:
1272 __Vector = ImportFromFile(asDataFile).getvalue( colNames )[1]
1274 __Vector = ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1276 __Vector = numpy.ravel(__Vector, order = "F")
1278 __Vector = numpy.ravel(__Vector, order = "C")
1280 __Vector, __Series = asVector, asPersistentVector
1282 if __Vector is not None:
1283 self.__is_vector = True
1284 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1285 self.shape = self.__V.shape
1286 self.size = self.__V.size
1287 elif __Series is not None:
1288 self.__is_series = True
1289 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1290 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1291 if isinstance(__Series, str): __Series = eval(__Series)
1292 for member in __Series:
1293 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1294 import sys ; sys.stdout.flush()
1297 if isinstance(self.__V.shape, (tuple, list)):
1298 self.shape = self.__V.shape
1300 self.shape = self.__V.shape()
1301 if len(self.shape) == 1:
1302 self.shape = (self.shape[0],1)
1303 self.size = self.shape[0] * self.shape[1]
1305 raise ValueError("The %s object is improperly defined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1307 if scheduledBy is not None:
1308 self.__T = scheduledBy
1310 def getO(self, withScheduler=False):
1312 return self.__V, self.__T
1313 elif self.__T is None:
1319 "Vérification du type interne"
1320 return self.__is_vector
1323 "Vérification du type interne"
1324 return self.__is_series
1327 "x.__repr__() <==> repr(x)"
1328 return repr(self.__V)
1331 "x.__str__() <==> str(x)"
1332 return str(self.__V)
1334 # ==============================================================================
1335 class Covariance(object):
1337 Classe générale d'interface de type covariance
1340 name = "GenericCovariance",
1341 asCovariance = None,
1342 asEyeByScalar = None,
1343 asEyeByVector = None,
1346 toBeChecked = False,
1349 Permet de définir une covariance :
1350 - asCovariance : entrée des données, comme une matrice compatible avec
1351 le constructeur de numpy.matrix
1352 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1353 multiplicatif d'une matrice de corrélation identité, aucune matrice
1354 n'étant donc explicitement à donner
1355 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1356 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1357 n'étant donc explicitement à donner
1358 - asCovObject : entrée des données comme un objet python, qui a les
1359 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1360 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1361 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1362 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1363 pleine doit être vérifié
1365 self.__name = str(name)
1366 self.__check = bool(toBeChecked)
1369 self.__is_scalar = False
1370 self.__is_vector = False
1371 self.__is_matrix = False
1372 self.__is_object = False
1374 if asScript is not None:
1375 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1377 __Scalar = ImportFromScript(asScript).getvalue( self.__name )
1379 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1381 __Object = ImportFromScript(asScript).getvalue( self.__name )
1383 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
1385 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1387 if __Scalar is not None:
1388 if numpy.matrix(__Scalar).size != 1:
1389 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size)
1390 self.__is_scalar = True
1391 self.__C = numpy.abs( float(__Scalar) )
1394 elif __Vector is not None:
1395 self.__is_vector = True
1396 self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) )
1397 self.shape = (self.__C.size,self.__C.size)
1398 self.size = self.__C.size**2
1399 elif __Matrix is not None:
1400 self.__is_matrix = True
1401 self.__C = numpy.matrix( __Matrix, float )
1402 self.shape = self.__C.shape
1403 self.size = self.__C.size
1404 elif __Object is not None:
1405 self.__is_object = True
1407 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"):
1408 if not hasattr(self.__C,at):
1409 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1410 if hasattr(self.__C,"shape"):
1411 self.shape = self.__C.shape
1414 if hasattr(self.__C,"size"):
1415 self.size = self.__C.size
1420 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1424 def __validate(self):
1426 if self.__C is None:
1427 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1428 if self.ismatrix() and min(self.shape) != max(self.shape):
1429 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1430 if self.isobject() and min(self.shape) != max(self.shape):
1431 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1432 if self.isscalar() and self.__C <= 0:
1433 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1434 if self.isvector() and (self.__C <= 0).any():
1435 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1436 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1438 L = numpy.linalg.cholesky( self.__C )
1440 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1441 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1443 L = self.__C.cholesky()
1445 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1448 "Vérification du type interne"
1449 return self.__is_scalar
1452 "Vérification du type interne"
1453 return self.__is_vector
1456 "Vérification du type interne"
1457 return self.__is_matrix
1460 "Vérification du type interne"
1461 return self.__is_object
1466 return Covariance(self.__name+"I", asCovariance = self.__C.I )
1467 elif self.isvector():
1468 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1469 elif self.isscalar():
1470 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1471 elif self.isobject():
1472 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1474 return None # Indispensable
1479 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1480 elif self.isvector():
1481 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1482 elif self.isscalar():
1483 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1484 elif self.isobject():
1485 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1488 "Décomposition de Cholesky"
1490 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1491 elif self.isvector():
1492 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1493 elif self.isscalar():
1494 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1495 elif self.isobject() and hasattr(self.__C,"cholesky"):
1496 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1498 def choleskyI(self):
1499 "Inversion de la décomposition de Cholesky"
1501 return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I )
1502 elif self.isvector():
1503 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1504 elif self.isscalar():
1505 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1506 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1507 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1509 def diag(self, msize=None):
1510 "Diagonale de la matrice"
1512 return numpy.diag(self.__C)
1513 elif self.isvector():
1515 elif self.isscalar():
1517 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1519 return self.__C * numpy.ones(int(msize))
1520 elif self.isobject():
1521 return self.__C.diag()
1523 def asfullmatrix(self, msize=None):
1527 elif self.isvector():
1528 return numpy.matrix( numpy.diag(self.__C), float )
1529 elif self.isscalar():
1531 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1533 return numpy.matrix( self.__C * numpy.eye(int(msize)), float )
1534 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1535 return self.__C.asfullmatrix()
1537 def trace(self, msize=None):
1538 "Trace de la matrice"
1540 return numpy.trace(self.__C)
1541 elif self.isvector():
1542 return float(numpy.sum(self.__C))
1543 elif self.isscalar():
1545 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1547 return self.__C * int(msize)
1548 elif self.isobject():
1549 return self.__C.trace()
1555 "x.__repr__() <==> repr(x)"
1556 return repr(self.__C)
1559 "x.__str__() <==> str(x)"
1560 return str(self.__C)
1562 def __add__(self, other):
1563 "x.__add__(y) <==> x+y"
1564 if self.ismatrix() or self.isobject():
1565 return self.__C + numpy.asmatrix(other)
1566 elif self.isvector() or self.isscalar():
1567 _A = numpy.asarray(other)
1568 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
1569 return numpy.asmatrix(_A)
1571 def __radd__(self, other):
1572 "x.__radd__(y) <==> y+x"
1573 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
1575 def __sub__(self, other):
1576 "x.__sub__(y) <==> x-y"
1577 if self.ismatrix() or self.isobject():
1578 return self.__C - numpy.asmatrix(other)
1579 elif self.isvector() or self.isscalar():
1580 _A = numpy.asarray(other)
1581 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
1582 return numpy.asmatrix(_A)
1584 def __rsub__(self, other):
1585 "x.__rsub__(y) <==> y-x"
1586 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
1589 "x.__neg__() <==> -x"
1592 def __mul__(self, other):
1593 "x.__mul__(y) <==> x*y"
1594 if self.ismatrix() and isinstance(other,numpy.matrix):
1595 return self.__C * other
1596 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1597 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1598 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1599 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1600 return self.__C * numpy.asmatrix(other)
1602 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
1603 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
1604 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1605 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
1606 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1607 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
1609 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1610 elif self.isscalar() and isinstance(other,numpy.matrix):
1611 return self.__C * other
1612 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
1613 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
1614 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1616 return self.__C * numpy.asmatrix(other)
1617 elif self.isobject():
1618 return self.__C.__mul__(other)
1620 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
1622 def __rmul__(self, other):
1623 "x.__rmul__(y) <==> y*x"
1624 if self.ismatrix() and isinstance(other,numpy.matrix):
1625 return other * self.__C
1626 elif self.isvector() and isinstance(other,numpy.matrix):
1627 if numpy.ravel(other).size == self.shape[0]: # Vecteur
1628 return numpy.asmatrix(numpy.ravel(other) * self.__C)
1629 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
1630 return numpy.asmatrix(numpy.array(other) * self.__C)
1632 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1633 elif self.isscalar() and isinstance(other,numpy.matrix):
1634 return other * self.__C
1635 elif self.isobject():
1636 return self.__C.__rmul__(other)
1638 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
1641 "x.__len__() <==> len(x)"
1642 return self.shape[0]
1644 # ==============================================================================
1645 class ObserverF(object):
1647 Creation d'une fonction d'observateur a partir de son texte
1649 def __init__(self, corps=""):
1650 self.__corps = corps
1651 def func(self,var,info):
1652 "Fonction d'observation"
1655 "Restitution du pointeur de fonction dans l'objet"
1658 # ==============================================================================
1659 class CaseLogger(object):
1661 Conservation des commandes de creation d'un cas
1663 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
1664 self.__name = str(__name)
1665 self.__objname = str(__objname)
1666 self.__logSerie = []
1667 self.__switchoff = False
1669 "TUI" :Interfaces._TUIViewer,
1670 "SCD" :Interfaces._SCDViewer,
1671 "YACS":Interfaces._YACSViewer,
1674 "TUI" :Interfaces._TUIViewer,
1675 "COM" :Interfaces._COMViewer,
1677 if __addViewers is not None:
1678 self.__viewers.update(dict(__addViewers))
1679 if __addLoaders is not None:
1680 self.__loaders.update(dict(__addLoaders))
1682 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
1683 "Enregistrement d'une commande individuelle"
1684 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
1685 if "self" in __keys: __keys.remove("self")
1686 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
1688 self.__switchoff = True
1690 self.__switchoff = False
1692 def dump(self, __filename=None, __format="TUI", __upa=""):
1693 "Restitution normalisée des commandes"
1694 if __format in self.__viewers:
1695 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
1697 raise ValueError("Dumping as \"%s\" is not available"%__format)
1698 return __formater.dump(__filename, __upa)
1700 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
1701 "Chargement normalisé des commandes"
1702 if __format in self.__loaders:
1703 __formater = self.__loaders[__format]()
1705 raise ValueError("Loading as \"%s\" is not available"%__format)
1706 return __formater.load(__filename, __content, __object)
1708 # ==============================================================================
1709 def CostFunction3D(_x,
1710 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
1711 _HmX = None, # Simulation déjà faite de Hm(x)
1712 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
1717 _SIV = False, # A résorber pour la 8.0
1718 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
1719 _nPS = 0, # nbPreviousSteps
1720 _QM = "DA", # QualityMeasure
1721 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
1722 _fRt = False, # Restitue ou pas la sortie étendue
1723 _sSc = True, # Stocke ou pas les SSC
1726 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
1727 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
1728 DFO, QuantileRegression
1734 for k in ["CostFunctionJ",
1740 "SimulatedObservationAtCurrentOptimum",
1741 "SimulatedObservationAtCurrentState",
1745 if hasattr(_SSV[k],"store"):
1746 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
1748 _X = numpy.asmatrix(numpy.ravel( _x )).T
1749 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
1750 _SSV["CurrentState"].append( _X )
1752 if _HmX is not None:
1756 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
1760 _HX = _Hm( _X, *_arg )
1761 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
1763 if "SimulatedObservationAtCurrentState" in _SSC or \
1764 "SimulatedObservationAtCurrentOptimum" in _SSC:
1765 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
1767 if numpy.any(numpy.isnan(_HX)):
1768 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
1770 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
1771 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
1772 if _BI is None or _RI is None:
1773 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
1774 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
1775 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
1776 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1777 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
1779 raise ValueError("Observation error covariance matrix has to be properly defined!")
1781 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1782 elif _QM in ["LeastSquares", "LS", "L2"]:
1784 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
1785 elif _QM in ["AbsoluteValue", "L1"]:
1787 Jo = numpy.sum( numpy.abs(_Y - _HX) )
1788 elif _QM in ["MaximumError", "ME"]:
1790 Jo = numpy.max( numpy.abs(_Y - _HX) )
1791 elif _QM in ["QR", "Null"]:
1795 raise ValueError("Unknown asked quality measure!")
1797 J = float( Jb ) + float( Jo )
1800 _SSV["CostFunctionJb"].append( Jb )
1801 _SSV["CostFunctionJo"].append( Jo )
1802 _SSV["CostFunctionJ" ].append( J )
1804 if "IndexOfOptimum" in _SSC or \
1805 "CurrentOptimum" in _SSC or \
1806 "SimulatedObservationAtCurrentOptimum" in _SSC:
1807 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
1808 if "IndexOfOptimum" in _SSC:
1809 _SSV["IndexOfOptimum"].append( IndexMin )
1810 if "CurrentOptimum" in _SSC:
1811 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
1812 if "SimulatedObservationAtCurrentOptimum" in _SSC:
1813 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
1818 if _QM in ["QR"]: # Pour le QuantileRegression
1823 # ==============================================================================
1824 if __name__ == "__main__":
1825 print('\n AUTODIAGNOSTIC \n')