1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2018 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
34 from functools import partial
35 from daCore import Persistence
36 from daCore import PlatformInfo
37 from daCore import Interfaces
38 from daCore import Templates
39 from daCore.Interfaces import ImportFromScript, ImportFromFile
41 # ==============================================================================
42 class CacheManager(object):
44 Classe générale de gestion d'un cache de calculs
47 toleranceInRedundancy = 1.e-18,
48 lenghtOfRedundancy = -1,
51 Les caractéristiques de tolérance peuvent être modifées à la création.
53 self.__tolerBP = float(toleranceInRedundancy)
54 self.__lenghtOR = int(lenghtOfRedundancy)
55 self.__initlnOR = self.__lenghtOR
60 self.__listOPCV = [] # Operator Previous Calculated Points, Results, Point Norms
61 # logging.debug("CM Tolerance de determination des doublons : %.2e", self.__tolerBP)
63 def wasCalculatedIn(self, xValue ): #, info="" ):
64 "Vérifie l'existence d'un calcul correspondant à la valeur"
67 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
68 if xValue.size != self.__listOPCV[i][0].size:
69 # logging.debug("CM Différence de la taille %s de X et de celle %s du point %i déjà calculé", xValue.shape,i,self.__listOPCP[i].shape)
71 if numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < self.__tolerBP * self.__listOPCV[i][2]:
73 __HxV = self.__listOPCV[i][1]
74 # logging.debug("CM Cas%s déja calculé, portant le numéro %i", info, i)
78 def storeValueInX(self, xValue, HxValue ):
79 "Stocke un calcul correspondant à la valeur"
80 if self.__lenghtOR < 0:
81 self.__lenghtOR = 2 * xValue.size + 2
82 self.__initlnOR = self.__lenghtOR
83 while len(self.__listOPCV) > self.__lenghtOR:
84 # logging.debug("CM Réduction de la liste des cas à %i éléments par suppression du premier", self.__lenghtOR)
85 self.__listOPCV.pop(0)
86 self.__listOPCV.append( (
87 copy.copy(numpy.ravel(xValue)),
89 numpy.linalg.norm(xValue),
94 self.__initlnOR = self.__lenghtOR
99 self.__lenghtOR = self.__initlnOR
101 # ==============================================================================
102 class Operator(object):
104 Classe générale d'interface de type opérateur simple
111 def __init__(self, fromMethod=None, fromMatrix=None, avoidingRedundancy = True, inputAsMultiFunction = False):
113 On construit un objet de ce type en fournissant, à l'aide de l'un des
114 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
117 - fromMethod : argument de type fonction Python
118 - fromMatrix : argument adapté au constructeur numpy.matrix
119 - avoidingRedundancy : évite ou pas les calculs redondants
120 - inputAsMultiFunction : fonction explicitement définie ou pas en multi-fonction
122 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
123 self.__AvoidRC = bool( avoidingRedundancy )
124 self.__inputAsMF = bool( inputAsMultiFunction )
125 if fromMethod is not None and self.__inputAsMF:
126 self.__Method = fromMethod # logtimer(fromMethod)
128 self.__Type = "Method"
129 elif fromMethod is not None and not self.__inputAsMF:
130 self.__Method = partial( MultiFonction, _sFunction=fromMethod)
132 self.__Type = "Method"
133 elif fromMatrix is not None:
135 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
136 self.__Type = "Matrix"
142 def disableAvoidingRedundancy(self):
144 Operator.CM.disable()
146 def enableAvoidingRedundancy(self):
151 Operator.CM.disable()
157 def appliedTo(self, xValue, HValue = None, argsAsSerie = False):
159 Permet de restituer le résultat de l'application de l'opérateur à un
160 argument xValue. Cette méthode se contente d'appliquer, son argument
161 devant a priori être du bon type.
163 - xValue : argument adapté pour appliquer l'opérateur
170 if HValue is not None:
174 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
176 if _HValue is not None:
177 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
179 for i in range(len(_HValue)):
180 HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T )
182 Operator.CM.storeValueInX(_xValue[i],HxValue[-1])
187 for i, xv in enumerate(_xValue):
189 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv)
191 __alreadyCalculated = False
193 if __alreadyCalculated:
194 self.__addOneCacheCall()
197 if self.__Matrix is not None:
198 self.__addOneMatrixCall()
199 _hv = self.__Matrix * xv
201 self.__addOneMethodCall()
205 HxValue.append( _hv )
207 if len(_xserie)>0 and self.__Matrix is None:
208 _hserie = self.__Method( _xserie ) # Calcul MF
209 if not hasattr(_hserie, "pop"):
210 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
216 Operator.CM.storeValueInX(_xv,_hv)
218 if argsAsSerie: return HxValue
219 else: return HxValue[-1]
221 def appliedControledFormTo(self, paire ):
223 Permet de restituer le résultat de l'application de l'opérateur à une
224 paire (xValue, uValue). Cette méthode se contente d'appliquer, son
225 argument devant a priori être du bon type. Si la uValue est None,
226 on suppose que l'opérateur ne s'applique qu'à xValue.
228 - xValue : argument X adapté pour appliquer l'opérateur
229 - uValue : argument U adapté pour appliquer l'opérateur
231 assert len(paire) == 2, "Incorrect number of arguments"
232 xValue, uValue = paire
233 if self.__Matrix is not None:
234 self.__addOneMatrixCall()
235 return self.__Matrix * xValue
236 elif uValue is not None:
237 self.__addOneMethodCall()
238 return self.__Method( (xValue, uValue) )
240 self.__addOneMethodCall()
241 return self.__Method( xValue )
243 def appliedInXTo(self, paires, argsAsSerie = False ):
245 Permet de restituer le résultat de l'application de l'opérateur à un
246 argument xValue, sachant que l'opérateur est valable en xNominal.
247 Cette méthode se contente d'appliquer, son argument devant a priori
248 être du bon type. Si l'opérateur est linéaire car c'est une matrice,
249 alors il est valable en tout point nominal et il n'est pas nécessaire
251 Arguments : une liste contenant
252 - xNominal : argument permettant de donner le point où l'opérateur
253 est construit pour etre ensuite appliqué
254 - xValue : argument adapté pour appliquer l'opérateur
256 if argsAsSerie: _nxValue = paires
257 else: _nxValue = (paires,)
258 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
260 if self.__Matrix is not None:
262 for paire in _nxValue:
263 _xNominal, _xValue = paire
264 self.__addOneMatrixCall()
265 HxValue.append( self.__Matrix * _xValue )
267 self.__addOneMethodCall( len(_nxValue) )
268 HxValue = self.__Method( _nxValue ) # Calcul MF
270 if argsAsSerie: return HxValue
271 else: return HxValue[-1]
273 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue"):
275 Permet de renvoyer l'opérateur sous la forme d'une matrice
277 if self.__Matrix is not None:
278 self.__addOneMatrixCall()
280 elif ValueForMethodForm is not "UnknownVoidValue": # Ne pas utiliser "None"
281 self.__addOneMethodCall()
282 return numpy.matrix( self.__Method( (ValueForMethodForm, None) ) )
284 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
288 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
289 la forme d'une matrice
291 if self.__Matrix is not None:
292 return self.__Matrix.shape
294 raise ValueError("Matrix form of the operator is not available, nor the shape")
296 def nbcalls(self, which=None):
298 Renvoie les nombres d'évaluations de l'opérateur
301 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
302 self.__NbCallsAsMatrix,
303 self.__NbCallsAsMethod,
304 self.__NbCallsOfCached,
305 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
306 Operator.NbCallsAsMatrix,
307 Operator.NbCallsAsMethod,
308 Operator.NbCallsOfCached,
310 if which is None: return __nbcalls
311 else: return __nbcalls[which]
313 def __addOneMatrixCall(self):
314 "Comptabilise un appel"
315 self.__NbCallsAsMatrix += 1 # Decompte local
316 Operator.NbCallsAsMatrix += 1 # Decompte global
318 def __addOneMethodCall(self):
319 "Comptabilise un appel"
320 self.__NbCallsAsMethod += 1 # Decompte local
321 Operator.NbCallsAsMethod += 1 # Decompte global
323 def __addOneCacheCall(self):
324 "Comptabilise un appel"
325 self.__NbCallsOfCached += 1 # Decompte local
326 Operator.NbCallsOfCached += 1 # Decompte global
328 # ==============================================================================
329 class FullOperator(object):
331 Classe générale d'interface de type opérateur complet
332 (Direct, Linéaire Tangent, Adjoint)
335 name = "GenericFullOperator",
337 asOneFunction = None, # Fonction
338 asThreeFunctions = None, # Fonctions dictionary
339 asScript = None, # Fonction(s) script
340 asDict = None, # Parameters
343 inputAsMF = False,# Fonction(s) as Multi-Functions
348 self.__name = str(name)
349 self.__check = bool(toBeChecked)
354 if (asDict is not None) and isinstance(asDict, dict):
355 __Parameters.update( asDict )
356 if "DifferentialIncrement" in asDict:
357 __Parameters["withIncrement"] = asDict["DifferentialIncrement"]
358 if "CenteredFiniteDifference" in asDict:
359 __Parameters["withCenteredDF"] = asDict["CenteredFiniteDifference"]
360 if "EnableMultiProcessing" in asDict:
361 __Parameters["withmpEnabled"] = asDict["EnableMultiProcessing"]
362 if "NumberOfProcesses" in asDict:
363 __Parameters["withmpWorkers"] = asDict["NumberOfProcesses"]
365 if asScript is not None:
366 __Matrix, __Function = None, None
368 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
370 __Function = { "Direct":ImportFromScript(asScript).getvalue( "DirectOperator" ) }
371 __Function.update({"useApproximatedDerivatives":True})
372 __Function.update(__Parameters)
373 elif asThreeFunctions:
375 "Direct" :ImportFromScript(asScript).getvalue( "DirectOperator" ),
376 "Tangent":ImportFromScript(asScript).getvalue( "TangentOperator" ),
377 "Adjoint":ImportFromScript(asScript).getvalue( "AdjointOperator" ),
379 __Function.update(__Parameters)
382 if asOneFunction is not None:
383 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
384 if asOneFunction["Direct"] is not None:
385 __Function = asOneFunction
387 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
389 __Function = { "Direct":asOneFunction }
390 __Function.update({"useApproximatedDerivatives":True})
391 __Function.update(__Parameters)
392 elif asThreeFunctions is not None:
393 if isinstance(asThreeFunctions, dict) and \
394 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
395 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
396 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
397 __Function = asThreeFunctions
398 elif isinstance(asThreeFunctions, dict) and \
399 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
400 __Function = asThreeFunctions
401 __Function.update({"useApproximatedDerivatives":True})
403 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
404 if "Direct" not in asThreeFunctions:
405 __Function["Direct"] = asThreeFunctions["Tangent"]
406 __Function.update(__Parameters)
410 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
411 # for k in ("Direct", "Tangent", "Adjoint"):
412 # if k in __Function and hasattr(__Function[k],"__class__"):
413 # if type(__Function[k]) is type(self.__init__):
414 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
416 if appliedInX is not None and isinstance(appliedInX, dict):
417 __appliedInX = appliedInX
418 elif appliedInX is not None:
419 __appliedInX = {"HXb":appliedInX}
423 if scheduledBy is not None:
424 self.__T = scheduledBy
426 if isinstance(__Function, dict) and \
427 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
428 ("Direct" in __Function) and (__Function["Direct"] is not None):
429 if "withCenteredDF" not in __Function: __Function["withCenteredDF"] = False
430 if "withIncrement" not in __Function: __Function["withIncrement"] = 0.01
431 if "withdX" not in __Function: __Function["withdX"] = None
432 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = True
433 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
434 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
435 if "withmpEnabled" not in __Function: __Function["withmpEnabled"] = False
436 if "withmpWorkers" not in __Function: __Function["withmpWorkers"] = None
437 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
438 from daNumerics.ApproximatedDerivatives import FDApproximation
439 FDA = FDApproximation(
440 Function = __Function["Direct"],
441 centeredDF = __Function["withCenteredDF"],
442 increment = __Function["withIncrement"],
443 dX = __Function["withdX"],
444 avoidingRedundancy = __Function["withAvoidingRedundancy"],
445 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
446 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
447 mpEnabled = __Function["withmpEnabled"],
448 mpWorkers = __Function["withmpWorkers"],
449 mfEnabled = __Function["withmfEnabled"],
451 self.__FO["Direct"] = Operator( fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF)
452 self.__FO["Tangent"] = Operator( fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
453 self.__FO["Adjoint"] = Operator( fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
454 elif isinstance(__Function, dict) and \
455 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
456 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
457 self.__FO["Direct"] = Operator( fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
458 self.__FO["Tangent"] = Operator( fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
459 self.__FO["Adjoint"] = Operator( fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
460 elif asMatrix is not None:
461 __matrice = numpy.matrix( __Matrix, numpy.float )
462 self.__FO["Direct"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
463 self.__FO["Tangent"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
464 self.__FO["Adjoint"] = Operator( fromMatrix = __matrice.T, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
467 raise ValueError("Improperly defined observation operator, it requires at minima either a matrix, a Direct for approximate derivatives or a Tangent/Adjoint pair.")
469 if __appliedInX is not None:
470 self.__FO["AppliedInX"] = {}
471 for key in list(__appliedInX.keys()):
472 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
473 # Pour le cas où l'on a une vraie matrice
474 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
475 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
476 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
477 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
479 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
481 self.__FO["AppliedInX"] = None
487 "x.__repr__() <==> repr(x)"
488 return repr(self.__V)
491 "x.__str__() <==> str(x)"
494 # ==============================================================================
495 class Algorithm(object):
497 Classe générale d'interface de type algorithme
499 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
500 d'assimilation, en fournissant un container (dictionnaire) de variables
501 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
503 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
505 def __init__(self, name):
507 L'initialisation présente permet de fabriquer des variables de stockage
508 disponibles de manière générique dans les algorithmes élémentaires. Ces
509 variables de stockage sont ensuite conservées dans un dictionnaire
510 interne à l'objet, mais auquel on accède par la méthode "get".
512 Les variables prévues sont :
513 - CostFunctionJ : fonction-cout globale, somme des deux parties suivantes
514 - CostFunctionJb : partie ébauche ou background de la fonction-cout
515 - CostFunctionJo : partie observations de la fonction-cout
516 - GradientOfCostFunctionJ : gradient de la fonction-cout globale
517 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-cout
518 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-cout
519 - CurrentState : état courant lors d'itérations
520 - Analysis : l'analyse Xa
521 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
522 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
523 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
524 - Innovation : l'innovation : d = Y - H(X)
525 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
526 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
527 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
528 - MahalanobisConsistency : indicateur de consistance des covariances
529 - OMA : Observation moins Analysis : Y - Xa
530 - OMB : Observation moins Background : Y - Xb
531 - AMB : Analysis moins Background : Xa - Xb
532 - APosterioriCovariance : matrice A
533 - APosterioriVariances : variances de la matrice A
534 - APosterioriStandardDeviations : écart-types de la matrice A
535 - APosterioriCorrelations : correlations de la matrice A
536 - Residu : dans le cas des algorithmes de vérification
537 On peut rajouter des variables à stocker dans l'initialisation de
538 l'algorithme élémentaire qui va hériter de cette classe
540 logging.debug("%s Initialisation", str(name))
541 self._m = PlatformInfo.SystemUsage()
543 self._name = str( name )
544 self._parameters = {"StoreSupplementaryCalculations":[]}
545 self.__required_parameters = {}
546 self.__required_inputs = {"RequiredInputValues":{"mandatory":(), "optional":()}}
548 self.StoredVariables = {}
549 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
550 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
551 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
552 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
553 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
554 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
555 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
556 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
557 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
558 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
559 self.StoredVariables["PredictedState"] = Persistence.OneVector(name = "PredictedState")
560 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
561 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
562 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
563 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
564 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
565 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
566 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
567 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
568 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
569 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
570 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
571 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
572 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
573 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
574 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
575 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
576 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
577 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
578 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
579 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
580 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
582 def _pre_run(self, Parameters, Xb=None, Y=None, R=None, B=None, Q=None ):
584 logging.debug("%s Lancement", self._name)
585 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
587 # Mise a jour de self._parameters avec Parameters
588 self.__setParameters(Parameters)
590 # Corrections et complements
591 def __test_vvalue(argument, variable, argname):
593 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
594 raise ValueError("%s %s vector %s has to be properly defined!"%(self._name,argname,variable))
595 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
596 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,variable))
598 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,variable))
600 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,variable,numpy.array(argument).size))
602 __test_vvalue( Xb, "Xb", "Background or initial state" )
603 __test_vvalue( Y, "Y", "Observation" )
605 def __test_cvalue(argument, variable, argname):
607 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
608 raise ValueError("%s %s error covariance matrix %s has to be properly defined!"%(self._name,argname,variable))
609 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
610 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,variable))
612 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,variable))
614 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,variable))
616 __test_cvalue( R, "R", "Observation" )
617 __test_cvalue( B, "B", "Background" )
618 __test_cvalue( Q, "Q", "Evolution" )
620 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
621 logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
623 self._parameters["Bounds"] = None
625 if logging.getLogger().level < logging.WARNING:
626 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
627 if PlatformInfo.has_scipy:
628 import scipy.optimize
629 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
631 self._parameters["optmessages"] = 15
633 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
634 if PlatformInfo.has_scipy:
635 import scipy.optimize
636 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
638 self._parameters["optmessages"] = 15
642 def _post_run(self,_oH=None):
644 if ("StoreSupplementaryCalculations" in self._parameters) and \
645 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
646 for _A in self.StoredVariables["APosterioriCovariance"]:
647 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
648 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
649 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
650 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
651 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
652 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
653 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
654 self.StoredVariables["APosterioriCorrelations"].store( _C )
656 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
657 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
658 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
659 logging.debug("%s Terminé", self._name)
662 def _toStore(self, key):
663 "True if in StoreSupplementaryCalculations, else False"
664 return key in self._parameters["StoreSupplementaryCalculations"]
666 def get(self, key=None):
668 Renvoie l'une des variables stockées identifiée par la clé, ou le
669 dictionnaire de l'ensemble des variables disponibles en l'absence de
670 clé. Ce sont directement les variables sous forme objet qui sont
671 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
672 des classes de persistance.
675 return self.StoredVariables[key]
677 return self.StoredVariables
679 def __contains__(self, key=None):
680 "D.__contains__(k) -> True if D has a key k, else False"
681 return key in self.StoredVariables
684 "D.keys() -> list of D's keys"
685 if hasattr(self, "StoredVariables"):
686 return self.StoredVariables.keys()
691 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
692 if hasattr(self, "StoredVariables"):
693 return self.StoredVariables.pop(k, d)
698 raise TypeError("pop expected at least 1 arguments, got 0")
699 "If key is not found, d is returned if given, otherwise KeyError is raised"
705 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
707 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
708 sa forme mathématique la plus naturelle possible.
710 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
712 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None):
714 Permet de définir dans l'algorithme des paramètres requis et leurs
715 caractéristiques par défaut.
718 raise ValueError("A name is mandatory to define a required parameter.")
720 self.__required_parameters[name] = {
722 "typecast" : typecast,
728 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
730 def getRequiredParameters(self, noDetails=True):
732 Renvoie la liste des noms de paramètres requis ou directement le
733 dictionnaire des paramètres requis.
736 return sorted(self.__required_parameters.keys())
738 return self.__required_parameters
740 def setParameterValue(self, name=None, value=None):
742 Renvoie la valeur d'un paramètre requis de manière contrôlée
744 default = self.__required_parameters[name]["default"]
745 typecast = self.__required_parameters[name]["typecast"]
746 minval = self.__required_parameters[name]["minval"]
747 maxval = self.__required_parameters[name]["maxval"]
748 listval = self.__required_parameters[name]["listval"]
750 if value is None and default is None:
752 elif value is None and default is not None:
753 if typecast is None: __val = default
754 else: __val = typecast( default )
756 if typecast is None: __val = value
757 else: __val = typecast( value )
759 if minval is not None and (numpy.array(__val, float) < minval).any():
760 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be less than %s."%(name, __val, minval))
761 if maxval is not None and (numpy.array(__val, float) > maxval).any():
762 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be greater than %s."%(name, __val, maxval))
763 if listval is not None:
764 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
767 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%(v, name, listval))
768 elif __val not in listval:
769 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%( __val, name,listval))
772 def requireInputArguments(self, mandatory=(), optional=()):
774 Permet d'imposer des arguments requises en entrée
776 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
777 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
779 def __setParameters(self, fromDico={}):
781 Permet de stocker les paramètres reçus dans le dictionnaire interne.
783 self._parameters.update( fromDico )
784 for k in self.__required_parameters.keys():
785 if k in fromDico.keys():
786 self._parameters[k] = self.setParameterValue(k,fromDico[k])
788 self._parameters[k] = self.setParameterValue(k)
789 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
791 # ==============================================================================
792 class AlgorithmAndParameters(object):
794 Classe générale d'interface d'action pour l'algorithme et ses paramètres
797 name = "GenericAlgorithm",
804 self.__name = str(name)
808 self.__algorithm = {}
809 self.__algorithmFile = None
810 self.__algorithmName = None
812 self.updateParameters( asDict, asScript )
814 if asAlgorithm is None and asScript is not None:
815 __Algo = ImportFromScript(asScript).getvalue( "Algorithm" )
819 if __Algo is not None:
820 self.__A = str(__Algo)
821 self.__P.update( {"Algorithm":self.__A} )
823 self.__setAlgorithm( self.__A )
825 def updateParameters(self,
829 "Mise a jour des parametres"
830 if asDict is None and asScript is not None:
831 __Dict = ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
835 if __Dict is not None:
836 self.__P.update( dict(__Dict) )
838 def executePythonScheme(self, asDictAO = None):
839 "Permet de lancer le calcul d'assimilation"
840 Operator.CM.clearCache()
842 if not isinstance(asDictAO, dict):
843 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
844 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
845 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
846 else: self.__Xb = None
847 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
848 else: self.__Y = asDictAO["Observation"]
849 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
850 else: self.__U = asDictAO["ControlInput"]
851 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
852 else: self.__HO = asDictAO["ObservationOperator"]
853 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
854 else: self.__EM = asDictAO["EvolutionModel"]
855 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
856 else: self.__CM = asDictAO["ControlModel"]
857 self.__B = asDictAO["BackgroundError"]
858 self.__R = asDictAO["ObservationError"]
859 self.__Q = asDictAO["EvolutionError"]
861 self.__shape_validate()
863 self.__algorithm.run(
873 Parameters = self.__P,
877 def executeYACSScheme(self, FileName=None):
878 "Permet de lancer le calcul d'assimilation"
879 if FileName is None or not os.path.exists(FileName):
880 raise ValueError("a YACS file name has to be given for YACS execution.\n")
882 __file = os.path.abspath(FileName)
883 logging.debug("The YACS file name is \"%s\"."%__file)
884 if not PlatformInfo.has_salome or \
885 not PlatformInfo.has_yacs or \
886 not PlatformInfo.has_adao:
887 raise ImportError("\n\n"+\
888 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
889 "Please load the right environnement before trying to use it.\n")
894 SALOMERuntime.RuntimeSALOME_setRuntime()
896 r = pilot.getRuntime()
897 xmlLoader = loader.YACSLoader()
898 xmlLoader.registerProcCataLoader()
900 catalogAd = r.loadCatalog("proc", __file)
901 r.addCatalog(catalogAd)
906 p = xmlLoader.load(__file)
907 except IOError as ex:
908 print("The YACS XML schema file can not be loaded: %s"%(ex,))
910 logger = p.getLogger("parser")
911 if not logger.isEmpty():
912 print("The imported YACS XML schema has errors on parsing:")
913 print(logger.getStr())
916 print("The YACS XML schema is not valid and will not be executed:")
917 print(p.getErrorReport())
919 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
920 p.checkConsistency(info)
921 if info.areWarningsOrErrors():
922 print("The YACS XML schema is not coherent and will not be executed:")
923 print(info.getGlobalRepr())
925 e = pilot.ExecutorSwig()
927 if p.getEffectiveState() != pilot.DONE:
928 print(p.getErrorReport())
932 def get(self, key = None):
933 "Vérifie l'existence d'une clé de variable ou de paramètres"
934 if key in self.__algorithm:
935 return self.__algorithm.get( key )
936 elif key in self.__P:
942 "Necessaire pour le pickling"
943 return self.__algorithm.pop(k, d)
945 def getAlgorithmRequiredParameters(self, noDetails=True):
946 "Renvoie la liste des paramètres requis selon l'algorithme"
947 return self.__algorithm.getRequiredParameters(noDetails)
949 def setObserver(self, __V, __O, __I, __S):
950 if self.__algorithm is None \
951 or isinstance(self.__algorithm, dict) \
952 or not hasattr(self.__algorithm,"StoredVariables"):
953 raise ValueError("No observer can be build before choosing an algorithm.")
954 if __V not in self.__algorithm:
955 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
957 self.__algorithm.StoredVariables[ __V ].setDataObserver(
960 HookParameters = __I,
963 def removeObserver(self, __V, __O, __A = False):
964 if self.__algorithm is None \
965 or isinstance(self.__algorithm, dict) \
966 or not hasattr(self.__algorithm,"StoredVariables"):
967 raise ValueError("No observer can be removed before choosing an algorithm.")
968 if __V not in self.__algorithm:
969 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
971 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
976 def hasObserver(self, __V):
977 if self.__algorithm is None \
978 or isinstance(self.__algorithm, dict) \
979 or not hasattr(self.__algorithm,"StoredVariables"):
981 if __V not in self.__algorithm:
983 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
986 return list(self.__algorithm.keys()) + list(self.__P.keys())
988 def __contains__(self, key=None):
989 "D.__contains__(k) -> True if D has a key k, else False"
990 return key in self.__algorithm or key in self.__P
993 "x.__repr__() <==> repr(x)"
994 return repr(self.__A)+", "+repr(self.__P)
997 "x.__str__() <==> str(x)"
998 return str(self.__A)+", "+str(self.__P)
1000 def __setAlgorithm(self, choice = None ):
1002 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1003 d'assimilation. L'argument est un champ caractère se rapportant au nom
1004 d'un algorithme réalisant l'opération sur les arguments fixes.
1007 raise ValueError("Error: algorithm choice has to be given")
1008 if self.__algorithmName is not None:
1009 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1010 daDirectory = "daAlgorithms"
1012 # Recherche explicitement le fichier complet
1013 # ------------------------------------------
1015 for directory in sys.path:
1016 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1017 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1018 if module_path is None:
1019 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1021 # Importe le fichier complet comme un module
1022 # ------------------------------------------
1024 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1025 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1026 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1027 raise ImportError("this module does not define a valid elementary algorithm.")
1028 self.__algorithmName = str(choice)
1029 sys.path = sys_path_tmp ; del sys_path_tmp
1030 except ImportError as e:
1031 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1033 # Instancie un objet du type élémentaire du fichier
1034 # -------------------------------------------------
1035 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1038 def __shape_validate(self):
1040 Validation de la correspondance correcte des tailles des variables et
1041 des matrices s'il y en a.
1043 if self.__Xb is None: __Xb_shape = (0,)
1044 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1045 elif hasattr(self.__Xb,"shape"):
1046 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1047 else: __Xb_shape = self.__Xb.shape()
1048 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1050 if self.__Y is None: __Y_shape = (0,)
1051 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1052 elif hasattr(self.__Y,"shape"):
1053 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1054 else: __Y_shape = self.__Y.shape()
1055 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1057 if self.__U is None: __U_shape = (0,)
1058 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1059 elif hasattr(self.__U,"shape"):
1060 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1061 else: __U_shape = self.__U.shape()
1062 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1064 if self.__B is None: __B_shape = (0,0)
1065 elif hasattr(self.__B,"shape"):
1066 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1067 else: __B_shape = self.__B.shape()
1068 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1070 if self.__R is None: __R_shape = (0,0)
1071 elif hasattr(self.__R,"shape"):
1072 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1073 else: __R_shape = self.__R.shape()
1074 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1076 if self.__Q is None: __Q_shape = (0,0)
1077 elif hasattr(self.__Q,"shape"):
1078 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1079 else: __Q_shape = self.__Q.shape()
1080 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1082 if len(self.__HO) == 0: __HO_shape = (0,0)
1083 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1084 elif hasattr(self.__HO["Direct"],"shape"):
1085 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1086 else: __HO_shape = self.__HO["Direct"].shape()
1087 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1089 if len(self.__EM) == 0: __EM_shape = (0,0)
1090 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1091 elif hasattr(self.__EM["Direct"],"shape"):
1092 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1093 else: __EM_shape = self.__EM["Direct"].shape()
1094 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1096 if len(self.__CM) == 0: __CM_shape = (0,0)
1097 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1098 elif hasattr(self.__CM["Direct"],"shape"):
1099 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1100 else: __CM_shape = self.__CM["Direct"].shape()
1101 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1103 # Vérification des conditions
1104 # ---------------------------
1105 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1106 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1107 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1108 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1110 if not( min(__B_shape) == max(__B_shape) ):
1111 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1112 if not( min(__R_shape) == max(__R_shape) ):
1113 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1114 if not( min(__Q_shape) == max(__Q_shape) ):
1115 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1116 if not( min(__EM_shape) == max(__EM_shape) ):
1117 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1119 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1120 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1121 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1122 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1123 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1124 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1125 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1126 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1128 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1129 if self.__algorithmName in ["EnsembleBlue",]:
1130 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1131 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1132 for member in asPersistentVector:
1133 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1134 __Xb_shape = min(__B_shape)
1136 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1138 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1139 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1141 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1142 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1144 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1145 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1147 if ("Bounds" in self.__P) \
1148 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1149 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1150 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1151 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1155 # ==============================================================================
1156 class RegulationAndParameters(object):
1158 Classe générale d'interface d'action pour la régulation et ses paramètres
1161 name = "GenericRegulation",
1168 self.__name = str(name)
1171 if asAlgorithm is None and asScript is not None:
1172 __Algo = ImportFromScript(asScript).getvalue( "Algorithm" )
1174 __Algo = asAlgorithm
1176 if asDict is None and asScript is not None:
1177 __Dict = ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1181 if __Dict is not None:
1182 self.__P.update( dict(__Dict) )
1184 if __Algo is not None:
1185 self.__P.update( {"Algorithm":self.__A} )
1187 def get(self, key = None):
1188 "Vérifie l'existence d'une clé de variable ou de paramètres"
1190 return self.__P[key]
1194 # ==============================================================================
1195 class DataObserver(object):
1197 Classe générale d'interface de type observer
1200 name = "GenericObserver",
1212 self.__name = str(name)
1217 if onVariable is None:
1218 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1219 elif type(onVariable) in (tuple, list):
1220 self.__V = tuple(map( str, onVariable ))
1221 if withInfo is None:
1224 self.__I = (str(withInfo),)*len(self.__V)
1225 elif isinstance(onVariable, str):
1226 self.__V = (onVariable,)
1227 if withInfo is None:
1228 self.__I = (onVariable,)
1230 self.__I = (str(withInfo),)
1232 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1234 if asString is not None:
1235 __FunctionText = asString
1236 elif (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1237 __FunctionText = Templates.ObserverTemplates[asTemplate]
1238 elif asScript is not None:
1239 __FunctionText = ImportFromScript(asScript).getstring()
1242 __Function = ObserverF(__FunctionText)
1244 if asObsObject is not None:
1245 self.__O = asObsObject
1247 self.__O = __Function.getfunc()
1249 for k in range(len(self.__V)):
1252 if ename not in withAlgo:
1253 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1255 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1258 "x.__repr__() <==> repr(x)"
1259 return repr(self.__V)+"\n"+repr(self.__O)
1262 "x.__str__() <==> str(x)"
1263 return str(self.__V)+"\n"+str(self.__O)
1265 # ==============================================================================
1266 class State(object):
1268 Classe générale d'interface de type état
1271 name = "GenericVector",
1273 asPersistentVector = None,
1279 toBeChecked = False,
1282 Permet de définir un vecteur :
1283 - asVector : entrée des données, comme un vecteur compatible avec le
1284 constructeur de numpy.matrix, ou "True" si entrée par script.
1285 - asPersistentVector : entrée des données, comme une série de vecteurs
1286 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1287 type Persistence, ou "True" si entrée par script.
1288 - asScript : si un script valide est donné contenant une variable
1289 nommée "name", la variable est de type "asVector" (par défaut) ou
1290 "asPersistentVector" selon que l'une de ces variables est placée à
1292 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1293 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1294 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1295 nommée "name"), on récupère les colonnes et on les range ligne après
1296 ligne (colMajor=False) ou colonne après colonne (colMajor=True). La
1297 variable résultante est de type "asVector" (par défaut) ou
1298 "asPersistentVector" selon que l'une de ces variables est placée à
1301 self.__name = str(name)
1302 self.__check = bool(toBeChecked)
1306 self.__is_vector = False
1307 self.__is_series = False
1309 if asScript is not None:
1310 __Vector, __Series = None, None
1311 if asPersistentVector:
1312 __Series = ImportFromScript(asScript).getvalue( self.__name )
1314 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1315 elif asDataFile is not None:
1316 __Vector, __Series = None, None
1317 if asPersistentVector:
1318 if colNames is not None:
1319 __Series = ImportFromFile(asDataFile).getvalue( colNames )[1]
1321 __Series = ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1322 if bool(colMajor) and not ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1323 __Series = numpy.transpose(__Series)
1324 elif not bool(colMajor) and ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1325 __Series = numpy.transpose(__Series)
1327 if colNames is not None:
1328 __Vector = ImportFromFile(asDataFile).getvalue( colNames )[1]
1330 __Vector = ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1332 __Vector = numpy.ravel(__Vector, order = "F")
1334 __Vector = numpy.ravel(__Vector, order = "C")
1336 __Vector, __Series = asVector, asPersistentVector
1338 if __Vector is not None:
1339 self.__is_vector = True
1340 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1341 self.shape = self.__V.shape
1342 self.size = self.__V.size
1343 elif __Series is not None:
1344 self.__is_series = True
1345 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1346 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1347 if isinstance(__Series, str): __Series = eval(__Series)
1348 for member in __Series:
1349 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1352 if isinstance(self.__V.shape, (tuple, list)):
1353 self.shape = self.__V.shape
1355 self.shape = self.__V.shape()
1356 if len(self.shape) == 1:
1357 self.shape = (self.shape[0],1)
1358 self.size = self.shape[0] * self.shape[1]
1360 raise ValueError("The %s object is improperly defined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1362 if scheduledBy is not None:
1363 self.__T = scheduledBy
1365 def getO(self, withScheduler=False):
1367 return self.__V, self.__T
1368 elif self.__T is None:
1374 "Vérification du type interne"
1375 return self.__is_vector
1378 "Vérification du type interne"
1379 return self.__is_series
1382 "x.__repr__() <==> repr(x)"
1383 return repr(self.__V)
1386 "x.__str__() <==> str(x)"
1387 return str(self.__V)
1389 # ==============================================================================
1390 class Covariance(object):
1392 Classe générale d'interface de type covariance
1395 name = "GenericCovariance",
1396 asCovariance = None,
1397 asEyeByScalar = None,
1398 asEyeByVector = None,
1401 toBeChecked = False,
1404 Permet de définir une covariance :
1405 - asCovariance : entrée des données, comme une matrice compatible avec
1406 le constructeur de numpy.matrix
1407 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1408 multiplicatif d'une matrice de corrélation identité, aucune matrice
1409 n'étant donc explicitement à donner
1410 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1411 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1412 n'étant donc explicitement à donner
1413 - asCovObject : entrée des données comme un objet python, qui a les
1414 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1415 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1416 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1417 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1418 pleine doit être vérifié
1420 self.__name = str(name)
1421 self.__check = bool(toBeChecked)
1424 self.__is_scalar = False
1425 self.__is_vector = False
1426 self.__is_matrix = False
1427 self.__is_object = False
1429 if asScript is not None:
1430 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1432 __Scalar = ImportFromScript(asScript).getvalue( self.__name )
1434 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1436 __Object = ImportFromScript(asScript).getvalue( self.__name )
1438 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
1440 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1442 if __Scalar is not None:
1443 if numpy.matrix(__Scalar).size != 1:
1444 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size)
1445 self.__is_scalar = True
1446 self.__C = numpy.abs( float(__Scalar) )
1449 elif __Vector is not None:
1450 self.__is_vector = True
1451 self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) )
1452 self.shape = (self.__C.size,self.__C.size)
1453 self.size = self.__C.size**2
1454 elif __Matrix is not None:
1455 self.__is_matrix = True
1456 self.__C = numpy.matrix( __Matrix, float )
1457 self.shape = self.__C.shape
1458 self.size = self.__C.size
1459 elif __Object is not None:
1460 self.__is_object = True
1462 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"):
1463 if not hasattr(self.__C,at):
1464 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1465 if hasattr(self.__C,"shape"):
1466 self.shape = self.__C.shape
1469 if hasattr(self.__C,"size"):
1470 self.size = self.__C.size
1475 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1479 def __validate(self):
1481 if self.__C is None:
1482 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1483 if self.ismatrix() and min(self.shape) != max(self.shape):
1484 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1485 if self.isobject() and min(self.shape) != max(self.shape):
1486 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1487 if self.isscalar() and self.__C <= 0:
1488 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1489 if self.isvector() and (self.__C <= 0).any():
1490 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1491 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1493 L = numpy.linalg.cholesky( self.__C )
1495 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1496 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1498 L = self.__C.cholesky()
1500 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1503 "Vérification du type interne"
1504 return self.__is_scalar
1507 "Vérification du type interne"
1508 return self.__is_vector
1511 "Vérification du type interne"
1512 return self.__is_matrix
1515 "Vérification du type interne"
1516 return self.__is_object
1521 return Covariance(self.__name+"I", asCovariance = self.__C.I )
1522 elif self.isvector():
1523 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1524 elif self.isscalar():
1525 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1526 elif self.isobject():
1527 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1529 return None # Indispensable
1534 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1535 elif self.isvector():
1536 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1537 elif self.isscalar():
1538 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1539 elif self.isobject():
1540 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1543 "Décomposition de Cholesky"
1545 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1546 elif self.isvector():
1547 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1548 elif self.isscalar():
1549 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1550 elif self.isobject() and hasattr(self.__C,"cholesky"):
1551 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1553 def choleskyI(self):
1554 "Inversion de la décomposition de Cholesky"
1556 return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I )
1557 elif self.isvector():
1558 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1559 elif self.isscalar():
1560 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1561 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1562 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1564 def diag(self, msize=None):
1565 "Diagonale de la matrice"
1567 return numpy.diag(self.__C)
1568 elif self.isvector():
1570 elif self.isscalar():
1572 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1574 return self.__C * numpy.ones(int(msize))
1575 elif self.isobject():
1576 return self.__C.diag()
1578 def asfullmatrix(self, msize=None):
1582 elif self.isvector():
1583 return numpy.matrix( numpy.diag(self.__C), float )
1584 elif self.isscalar():
1586 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1588 return numpy.matrix( self.__C * numpy.eye(int(msize)), float )
1589 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1590 return self.__C.asfullmatrix()
1592 def trace(self, msize=None):
1593 "Trace de la matrice"
1595 return numpy.trace(self.__C)
1596 elif self.isvector():
1597 return float(numpy.sum(self.__C))
1598 elif self.isscalar():
1600 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1602 return self.__C * int(msize)
1603 elif self.isobject():
1604 return self.__C.trace()
1610 "x.__repr__() <==> repr(x)"
1611 return repr(self.__C)
1614 "x.__str__() <==> str(x)"
1615 return str(self.__C)
1617 def __add__(self, other):
1618 "x.__add__(y) <==> x+y"
1619 if self.ismatrix() or self.isobject():
1620 return self.__C + numpy.asmatrix(other)
1621 elif self.isvector() or self.isscalar():
1622 _A = numpy.asarray(other)
1623 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
1624 return numpy.asmatrix(_A)
1626 def __radd__(self, other):
1627 "x.__radd__(y) <==> y+x"
1628 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
1630 def __sub__(self, other):
1631 "x.__sub__(y) <==> x-y"
1632 if self.ismatrix() or self.isobject():
1633 return self.__C - numpy.asmatrix(other)
1634 elif self.isvector() or self.isscalar():
1635 _A = numpy.asarray(other)
1636 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
1637 return numpy.asmatrix(_A)
1639 def __rsub__(self, other):
1640 "x.__rsub__(y) <==> y-x"
1641 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
1644 "x.__neg__() <==> -x"
1647 def __mul__(self, other):
1648 "x.__mul__(y) <==> x*y"
1649 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1650 return self.__C * other
1651 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1652 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1653 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1654 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1655 return self.__C * numpy.asmatrix(other)
1657 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
1658 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
1659 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1660 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
1661 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1662 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
1664 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1665 elif self.isscalar() and isinstance(other,numpy.matrix):
1666 return self.__C * other
1667 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
1668 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
1669 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1671 return self.__C * numpy.asmatrix(other)
1672 elif self.isobject():
1673 return self.__C.__mul__(other)
1675 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
1677 def __rmul__(self, other):
1678 "x.__rmul__(y) <==> y*x"
1679 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1680 return other * self.__C
1681 elif self.isvector() and isinstance(other,numpy.matrix):
1682 if numpy.ravel(other).size == self.shape[0]: # Vecteur
1683 return numpy.asmatrix(numpy.ravel(other) * self.__C)
1684 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
1685 return numpy.asmatrix(numpy.array(other) * self.__C)
1687 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
1688 elif self.isscalar() and isinstance(other,numpy.matrix):
1689 return other * self.__C
1690 elif self.isobject():
1691 return self.__C.__rmul__(other)
1693 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
1696 "x.__len__() <==> len(x)"
1697 return self.shape[0]
1699 # ==============================================================================
1700 class ObserverF(object):
1702 Creation d'une fonction d'observateur a partir de son texte
1704 def __init__(self, corps=""):
1705 self.__corps = corps
1706 def func(self,var,info):
1707 "Fonction d'observation"
1710 "Restitution du pointeur de fonction dans l'objet"
1713 # ==============================================================================
1714 class CaseLogger(object):
1716 Conservation des commandes de creation d'un cas
1718 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
1719 self.__name = str(__name)
1720 self.__objname = str(__objname)
1721 self.__logSerie = []
1722 self.__switchoff = False
1724 "TUI" :Interfaces._TUIViewer,
1725 "SCD" :Interfaces._SCDViewer,
1726 "YACS":Interfaces._YACSViewer,
1729 "TUI" :Interfaces._TUIViewer,
1730 "COM" :Interfaces._COMViewer,
1732 if __addViewers is not None:
1733 self.__viewers.update(dict(__addViewers))
1734 if __addLoaders is not None:
1735 self.__loaders.update(dict(__addLoaders))
1737 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
1738 "Enregistrement d'une commande individuelle"
1739 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
1740 if "self" in __keys: __keys.remove("self")
1741 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
1743 self.__switchoff = True
1745 self.__switchoff = False
1747 def dump(self, __filename=None, __format="TUI", __upa=""):
1748 "Restitution normalisée des commandes"
1749 if __format in self.__viewers:
1750 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
1752 raise ValueError("Dumping as \"%s\" is not available"%__format)
1753 return __formater.dump(__filename, __upa)
1755 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
1756 "Chargement normalisé des commandes"
1757 if __format in self.__loaders:
1758 __formater = self.__loaders[__format]()
1760 raise ValueError("Loading as \"%s\" is not available"%__format)
1761 return __formater.load(__filename, __content, __object)
1763 # ==============================================================================
1764 def MultiFonction( __xserie, _sFunction = lambda x: x ):
1766 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
1767 correspondante de valeurs de la fonction en argument
1769 if not PlatformInfo.isIterable( __xserie ):
1770 raise ValueError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
1773 for __xvalue in __xserie:
1774 __multiHX.append( _sFunction( __xvalue ) )
1778 # ==============================================================================
1779 def CostFunction3D(_x,
1780 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
1781 _HmX = None, # Simulation déjà faite de Hm(x)
1782 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
1787 _SIV = False, # A résorber pour la 8.0
1788 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
1789 _nPS = 0, # nbPreviousSteps
1790 _QM = "DA", # QualityMeasure
1791 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
1792 _fRt = False, # Restitue ou pas la sortie étendue
1793 _sSc = True, # Stocke ou pas les SSC
1796 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
1797 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
1798 DFO, QuantileRegression
1804 for k in ["CostFunctionJ",
1810 "SimulatedObservationAtCurrentOptimum",
1811 "SimulatedObservationAtCurrentState",
1815 if hasattr(_SSV[k],"store"):
1816 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
1818 _X = numpy.asmatrix(numpy.ravel( _x )).T
1819 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
1820 _SSV["CurrentState"].append( _X )
1822 if _HmX is not None:
1826 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
1830 _HX = _Hm( _X, *_arg )
1831 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
1833 if "SimulatedObservationAtCurrentState" in _SSC or \
1834 "SimulatedObservationAtCurrentOptimum" in _SSC:
1835 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
1837 if numpy.any(numpy.isnan(_HX)):
1838 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
1840 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
1841 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
1842 if _BI is None or _RI is None:
1843 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
1844 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
1845 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
1846 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1847 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
1849 raise ValueError("Observation error covariance matrix has to be properly defined!")
1851 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1852 elif _QM in ["LeastSquares", "LS", "L2"]:
1854 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
1855 elif _QM in ["AbsoluteValue", "L1"]:
1857 Jo = numpy.sum( numpy.abs(_Y - _HX) )
1858 elif _QM in ["MaximumError", "ME"]:
1860 Jo = numpy.max( numpy.abs(_Y - _HX) )
1861 elif _QM in ["QR", "Null"]:
1865 raise ValueError("Unknown asked quality measure!")
1867 J = float( Jb ) + float( Jo )
1870 _SSV["CostFunctionJb"].append( Jb )
1871 _SSV["CostFunctionJo"].append( Jo )
1872 _SSV["CostFunctionJ" ].append( J )
1874 if "IndexOfOptimum" in _SSC or \
1875 "CurrentOptimum" in _SSC or \
1876 "SimulatedObservationAtCurrentOptimum" in _SSC:
1877 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
1878 if "IndexOfOptimum" in _SSC:
1879 _SSV["IndexOfOptimum"].append( IndexMin )
1880 if "CurrentOptimum" in _SSC:
1881 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
1882 if "SimulatedObservationAtCurrentOptimum" in _SSC:
1883 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
1888 if _QM in ["QR"]: # Pour le QuantileRegression
1893 # ==============================================================================
1894 if __name__ == "__main__":
1895 print('\n AUTODIAGNOSTIC \n')