1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2018 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 Ce module est destiné à être appelée par AssimilationStudy.
28 __author__ = "Jean-Philippe ARGAUD"
36 from daCore import Persistence
37 from daCore import PlatformInfo
38 from daCore import Interfaces
39 from daCore import Templates
40 from daCore.Interfaces import ImportFromScript
42 # ==============================================================================
43 class CacheManager(object):
45 Classe générale de gestion d'un cache de calculs
48 toleranceInRedundancy = 1.e-18,
49 lenghtOfRedundancy = -1,
52 Les caractéristiques de tolérance peuvent être modifées à la création.
54 self.__tolerBP = float(toleranceInRedundancy)
55 self.__lenghtOR = int(lenghtOfRedundancy)
56 self.__initlnOR = self.__lenghtOR
61 self.__listOPCV = [] # Operator Previous Calculated Points, Results, Point Norms
62 # logging.debug("CM Tolerance de determination des doublons : %.2e", self.__tolerBP)
64 def wasCalculatedIn(self, xValue ): #, info="" ):
65 "Vérifie l'existence d'un calcul correspondant à la valeur"
68 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
69 if xValue.size != self.__listOPCV[i][0].size:
70 # logging.debug("CM Différence de la taille %s de X et de celle %s du point %i déjà calculé", xValue.shape,i,self.__listOPCP[i].shape)
72 if numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < self.__tolerBP * self.__listOPCV[i][2]:
74 __HxV = self.__listOPCV[i][1]
75 # logging.debug("CM Cas%s déja calculé, portant le numéro %i", info, i)
79 def storeValueInX(self, xValue, HxValue ):
80 "Stocke un calcul correspondant à la valeur"
81 if self.__lenghtOR < 0:
82 self.__lenghtOR = 2 * xValue.size + 2
83 self.__initlnOR = self.__lenghtOR
84 while len(self.__listOPCV) > self.__lenghtOR:
85 # logging.debug("CM Réduction de la liste des cas à %i éléments par suppression du premier", self.__lenghtOR)
86 self.__listOPCV.pop(0)
87 self.__listOPCV.append( (
88 copy.copy(numpy.ravel(xValue)),
90 numpy.linalg.norm(xValue),
95 self.__initlnOR = self.__lenghtOR
100 self.__lenghtOR = self.__initlnOR
102 # ==============================================================================
103 class Operator(object):
105 Classe générale d'interface de type opérateur simple
112 def __init__(self, fromMethod=None, fromMatrix=None, avoidingRedundancy = True):
114 On construit un objet de ce type en fournissant à l'aide de l'un des
115 deux mots-clé, soit une fonction python, soit une matrice.
117 - fromMethod : argument de type fonction Python
118 - fromMatrix : argument adapté au constructeur numpy.matrix
119 - avoidingRedundancy : évite ou pas les calculs redondants
121 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
122 self.__AvoidRC = bool( avoidingRedundancy )
123 if fromMethod is not None:
124 self.__Method = fromMethod # logtimer(fromMethod)
126 self.__Type = "Method"
127 elif fromMatrix is not None:
129 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
130 self.__Type = "Matrix"
136 def disableAvoidingRedundancy(self):
138 Operator.CM.disable()
140 def enableAvoidingRedundancy(self):
145 Operator.CM.disable()
151 def appliedTo(self, xValue, HValue = None):
153 Permet de restituer le résultat de l'application de l'opérateur à un
154 argument xValue. Cette méthode se contente d'appliquer, son argument
155 devant a priori être du bon type.
157 - xValue : argument adapté pour appliquer l'opérateur
159 if HValue is not None:
160 HxValue = numpy.asmatrix( numpy.ravel( HValue ) ).T
162 Operator.CM.storeValueInX(xValue,HxValue)
165 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xValue)
167 __alreadyCalculated = False
169 if __alreadyCalculated:
170 self.__addOneCacheCall()
173 if self.__Matrix is not None:
174 self.__addOneMatrixCall()
175 HxValue = self.__Matrix * xValue
177 self.__addOneMethodCall()
178 HxValue = self.__Method( xValue )
180 Operator.CM.storeValueInX(xValue,HxValue)
184 def appliedControledFormTo(self, paire ):
186 Permet de restituer le résultat de l'application de l'opérateur à une
187 paire (xValue, uValue). Cette méthode se contente d'appliquer, son
188 argument devant a priori être du bon type. Si la uValue est None,
189 on suppose que l'opérateur ne s'applique qu'à xValue.
191 - xValue : argument X adapté pour appliquer l'opérateur
192 - uValue : argument U adapté pour appliquer l'opérateur
194 assert len(paire) == 2, "Incorrect number of arguments"
195 xValue, uValue = paire
196 if self.__Matrix is not None:
197 self.__addOneMatrixCall()
198 return self.__Matrix * xValue
199 elif uValue is not None:
200 self.__addOneMethodCall()
201 return self.__Method( (xValue, uValue) )
203 self.__addOneMethodCall()
204 return self.__Method( xValue )
206 def appliedInXTo(self, paire ):
208 Permet de restituer le résultat de l'application de l'opérateur à un
209 argument xValue, sachant que l'opérateur est valable en xNominal.
210 Cette méthode se contente d'appliquer, son argument devant a priori
211 être du bon type. Si l'opérateur est linéaire car c'est une matrice,
212 alors il est valable en tout point nominal et il n'est pas nécessaire
214 Arguments : une liste contenant
215 - xNominal : argument permettant de donner le point où l'opérateur
216 est construit pour etre ensuite appliqué
217 - xValue : argument adapté pour appliquer l'opérateur
219 assert len(paire) == 2, "Incorrect number of arguments"
220 xNominal, xValue = paire
221 if self.__Matrix is not None:
222 self.__addOneMatrixCall()
223 return self.__Matrix * xValue
225 self.__addOneMethodCall()
226 return self.__Method( (xNominal, xValue) )
228 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue"):
230 Permet de renvoyer l'opérateur sous la forme d'une matrice
232 if self.__Matrix is not None:
233 self.__addOneMatrixCall()
235 elif ValueForMethodForm is not "UnknownVoidValue": # Ne pas utiliser "None"
236 self.__addOneMethodCall()
237 return numpy.matrix( self.__Method( (ValueForMethodForm, None) ) )
239 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
243 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
244 la forme d'une matrice
246 if self.__Matrix is not None:
247 return self.__Matrix.shape
249 raise ValueError("Matrix form of the operator is not available, nor the shape")
251 def nbcalls(self, which=None):
253 Renvoie les nombres d'évaluations de l'opérateur
256 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
257 self.__NbCallsAsMatrix,
258 self.__NbCallsAsMethod,
259 self.__NbCallsOfCached,
260 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
261 Operator.NbCallsAsMatrix,
262 Operator.NbCallsAsMethod,
263 Operator.NbCallsOfCached,
265 if which is None: return __nbcalls
266 else: return __nbcalls[which]
268 def __addOneMatrixCall(self):
269 "Comptabilise un appel"
270 self.__NbCallsAsMatrix += 1 # Decompte local
271 Operator.NbCallsAsMatrix += 1 # Decompte global
273 def __addOneMethodCall(self):
274 "Comptabilise un appel"
275 self.__NbCallsAsMethod += 1 # Decompte local
276 Operator.NbCallsAsMethod += 1 # Decompte global
278 def __addOneCacheCall(self):
279 "Comptabilise un appel"
280 self.__NbCallsOfCached += 1 # Decompte local
281 Operator.NbCallsOfCached += 1 # Decompte global
283 # ==============================================================================
284 class FullOperator(object):
286 Classe générale d'interface de type opérateur complet
287 (Direct, Linéaire Tangent, Adjoint)
290 name = "GenericFullOperator",
292 asOneFunction = None, # Fonction
293 asThreeFunctions = None, # Fonctions dictionary
294 asScript = None, # Fonction(s) script
295 asDict = None, # Parameters
302 self.__name = str(name)
303 self.__check = bool(toBeChecked)
308 if (asDict is not None) and isinstance(asDict, dict):
309 __Parameters.update( asDict )
310 if "DifferentialIncrement" in asDict:
311 __Parameters["withIncrement"] = asDict["DifferentialIncrement"]
312 if "CenteredFiniteDifference" in asDict:
313 __Parameters["withCenteredDF"] = asDict["CenteredFiniteDifference"]
314 if "EnableMultiProcessing" in asDict:
315 __Parameters["withmpEnabled"] = asDict["EnableMultiProcessing"]
316 if "NumberOfProcesses" in asDict:
317 __Parameters["withmpWorkers"] = asDict["NumberOfProcesses"]
319 if asScript is not None:
320 __Matrix, __Function = None, None
322 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
324 __Function = { "Direct":ImportFromScript(asScript).getvalue( "DirectOperator" ) }
325 __Function.update({"useApproximatedDerivatives":True})
326 __Function.update(__Parameters)
327 elif asThreeFunctions:
329 "Direct" :ImportFromScript(asScript).getvalue( "DirectOperator" ),
330 "Tangent":ImportFromScript(asScript).getvalue( "TangentOperator" ),
331 "Adjoint":ImportFromScript(asScript).getvalue( "AdjointOperator" ),
333 __Function.update(__Parameters)
336 if asOneFunction is not None:
337 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
338 if asOneFunction["Direct"] is not None:
339 __Function = asOneFunction
341 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
343 __Function = { "Direct":asOneFunction }
344 __Function.update({"useApproximatedDerivatives":True})
345 __Function.update(__Parameters)
346 elif asThreeFunctions is not None:
347 if isinstance(asThreeFunctions, dict) and \
348 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
349 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
350 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
351 __Function = asThreeFunctions
352 elif isinstance(asThreeFunctions, dict) and \
353 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
354 __Function = asThreeFunctions
355 __Function.update({"useApproximatedDerivatives":True})
357 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
358 if "Direct" not in asThreeFunctions:
359 __Function["Direct"] = asThreeFunctions["Tangent"]
360 __Function.update(__Parameters)
364 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
365 # for k in ("Direct", "Tangent", "Adjoint"):
366 # if k in __Function and hasattr(__Function[k],"__class__"):
367 # if type(__Function[k]) is type(self.__init__):
368 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
370 if appliedInX is not None and isinstance(appliedInX, dict):
371 __appliedInX = appliedInX
372 elif appliedInX is not None:
373 __appliedInX = {"HXb":appliedInX}
377 if scheduledBy is not None:
378 self.__T = scheduledBy
380 if isinstance(__Function, dict) and \
381 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
382 ("Direct" in __Function) and (__Function["Direct"] is not None):
383 if "withCenteredDF" not in __Function: __Function["withCenteredDF"] = False
384 if "withIncrement" not in __Function: __Function["withIncrement"] = 0.01
385 if "withdX" not in __Function: __Function["withdX"] = None
386 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = True
387 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
388 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
389 if "withmpEnabled" not in __Function: __Function["withmpEnabled"] = False
390 if "withmpWorkers" not in __Function: __Function["withmpWorkers"] = None
391 from daNumerics.ApproximatedDerivatives import FDApproximation
392 FDA = FDApproximation(
393 Function = __Function["Direct"],
394 centeredDF = __Function["withCenteredDF"],
395 increment = __Function["withIncrement"],
396 dX = __Function["withdX"],
397 avoidingRedundancy = __Function["withAvoidingRedundancy"],
398 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
399 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
400 mpEnabled = __Function["withmpEnabled"],
401 mpWorkers = __Function["withmpWorkers"],
403 self.__FO["Direct"] = Operator( fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC )
404 self.__FO["Tangent"] = Operator( fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC )
405 self.__FO["Adjoint"] = Operator( fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC )
406 elif isinstance(__Function, dict) and \
407 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
408 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
409 self.__FO["Direct"] = Operator( fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC )
410 self.__FO["Tangent"] = Operator( fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC )
411 self.__FO["Adjoint"] = Operator( fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC )
412 elif asMatrix is not None:
413 __matrice = numpy.matrix( __Matrix, numpy.float )
414 self.__FO["Direct"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC )
415 self.__FO["Tangent"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC )
416 self.__FO["Adjoint"] = Operator( fromMatrix = __matrice.T, avoidingRedundancy = avoidRC )
419 raise ValueError("Improperly defined observation operator, it requires at minima either a matrix, a Direct for approximate derivatives or a Tangent/Adjoint pair.")
421 if __appliedInX is not None:
422 self.__FO["AppliedInX"] = {}
423 if not isinstance(__appliedInX, dict):
424 raise ValueError("Error: observation operator defined by \"AppliedInX\" need a dictionary as argument.")
425 for key in list(__appliedInX.keys()):
426 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
427 # Pour le cas où l'on a une vraie matrice
428 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
429 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
430 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
431 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
433 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
435 self.__FO["AppliedInX"] = None
441 "x.__repr__() <==> repr(x)"
442 return repr(self.__V)
445 "x.__str__() <==> str(x)"
448 # ==============================================================================
449 class Algorithm(object):
451 Classe générale d'interface de type algorithme
453 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
454 d'assimilation, en fournissant un container (dictionnaire) de variables
455 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
457 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
459 def __init__(self, name):
461 L'initialisation présente permet de fabriquer des variables de stockage
462 disponibles de manière générique dans les algorithmes élémentaires. Ces
463 variables de stockage sont ensuite conservées dans un dictionnaire
464 interne à l'objet, mais auquel on accède par la méthode "get".
466 Les variables prévues sont :
467 - CostFunctionJ : fonction-cout globale, somme des deux parties suivantes
468 - CostFunctionJb : partie ébauche ou background de la fonction-cout
469 - CostFunctionJo : partie observations de la fonction-cout
470 - GradientOfCostFunctionJ : gradient de la fonction-cout globale
471 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-cout
472 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-cout
473 - CurrentState : état courant lors d'itérations
474 - Analysis : l'analyse Xa
475 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
476 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
477 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
478 - Innovation : l'innovation : d = Y - H(X)
479 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
480 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
481 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
482 - MahalanobisConsistency : indicateur de consistance des covariances
483 - OMA : Observation moins Analysis : Y - Xa
484 - OMB : Observation moins Background : Y - Xb
485 - AMB : Analysis moins Background : Xa - Xb
486 - APosterioriCovariance : matrice A
487 - APosterioriVariances : variances de la matrice A
488 - APosterioriStandardDeviations : écart-types de la matrice A
489 - APosterioriCorrelations : correlations de la matrice A
490 - Residu : dans le cas des algorithmes de vérification
491 On peut rajouter des variables à stocker dans l'initialisation de
492 l'algorithme élémentaire qui va hériter de cette classe
494 logging.debug("%s Initialisation", str(name))
495 self._m = PlatformInfo.SystemUsage()
497 self._name = str( name )
498 self._parameters = {"StoreSupplementaryCalculations":[]}
499 self.__required_parameters = {}
500 self.__required_inputs = {"RequiredInputValues":{"mandatory":(), "optional":()}}
502 self.StoredVariables = {}
503 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
504 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
505 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
506 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
507 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
508 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
509 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
510 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
511 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
512 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
513 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
514 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
515 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
516 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
517 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
518 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
519 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
520 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
521 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
522 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
523 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
524 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
525 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
526 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
527 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
528 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
529 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
530 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
531 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
532 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
533 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
535 def _pre_run(self, Parameters, Xb=None, Y=None, R=None, B=None, Q=None ):
537 logging.debug("%s Lancement", self._name)
538 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
540 # Mise a jour de self._parameters avec Parameters
541 self.__setParameters(Parameters)
543 # Corrections et complements
544 def __test_vvalue( argument, variable, argname):
546 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
547 raise ValueError("%s %s vector %s has to be properly defined!"%(self._name,argname,variable))
548 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
549 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,variable))
551 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,variable))
553 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,variable,numpy.array(argument).size))
554 __test_vvalue( Xb, "Xb", "Background or initial state" )
555 __test_vvalue( Y, "Y", "Observation" )
556 def __test_cvalue( argument, variable, argname):
558 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
559 raise ValueError("%s %s error covariance matrix %s has to be properly defined!"%(self._name,argname,variable))
560 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
561 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,variable))
563 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,variable))
565 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,variable))
566 __test_cvalue( R, "R", "Observation" )
567 __test_cvalue( B, "B", "Background" )
568 __test_cvalue( Q, "Q", "Evolution" )
570 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
571 logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
573 self._parameters["Bounds"] = None
575 if logging.getLogger().level < logging.WARNING:
576 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
577 if PlatformInfo.has_scipy:
578 import scipy.optimize
579 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
581 self._parameters["optmessages"] = 15
583 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
584 if PlatformInfo.has_scipy:
585 import scipy.optimize
586 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
588 self._parameters["optmessages"] = 15
592 def _post_run(self,_oH=None):
594 if ("StoreSupplementaryCalculations" in self._parameters) and \
595 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
596 for _A in self.StoredVariables["APosterioriCovariance"]:
597 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
598 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
599 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
600 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
601 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
602 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
603 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
604 self.StoredVariables["APosterioriCorrelations"].store( _C )
606 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
607 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
608 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
609 logging.debug("%s Terminé", self._name)
612 def get(self, key=None):
614 Renvoie l'une des variables stockées identifiée par la clé, ou le
615 dictionnaire de l'ensemble des variables disponibles en l'absence de
616 clé. Ce sont directement les variables sous forme objet qui sont
617 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
618 des classes de persistance.
621 return self.StoredVariables[key]
623 return self.StoredVariables
625 def __contains__(self, key=None):
626 "D.__contains__(k) -> True if D has a key k, else False"
627 return key in self.StoredVariables
630 "D.keys() -> list of D's keys"
631 if hasattr(self, "StoredVariables"):
632 return self.StoredVariables.keys()
637 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
638 if hasattr(self, "StoredVariables"):
639 return self.StoredVariables.pop(k, d)
644 raise TypeError("pop expected at least 1 arguments, got 0")
645 "If key is not found, d is returned if given, otherwise KeyError is raised"
651 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
653 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
654 sa forme mathématique la plus naturelle possible.
656 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
658 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None):
660 Permet de définir dans l'algorithme des paramètres requis et leurs
661 caractéristiques par défaut.
664 raise ValueError("A name is mandatory to define a required parameter.")
666 self.__required_parameters[name] = {
668 "typecast" : typecast,
674 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
676 def getRequiredParameters(self, noDetails=True):
678 Renvoie la liste des noms de paramètres requis ou directement le
679 dictionnaire des paramètres requis.
682 return sorted(self.__required_parameters.keys())
684 return self.__required_parameters
686 def setParameterValue(self, name=None, value=None):
688 Renvoie la valeur d'un paramètre requis de manière contrôlée
690 default = self.__required_parameters[name]["default"]
691 typecast = self.__required_parameters[name]["typecast"]
692 minval = self.__required_parameters[name]["minval"]
693 maxval = self.__required_parameters[name]["maxval"]
694 listval = self.__required_parameters[name]["listval"]
696 if value is None and default is None:
698 elif value is None and default is not None:
699 if typecast is None: __val = default
700 else: __val = typecast( default )
702 if typecast is None: __val = value
703 else: __val = typecast( value )
705 if minval is not None and (numpy.array(__val, float) < minval).any():
706 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be less than %s."%(name, __val, minval))
707 if maxval is not None and (numpy.array(__val, float) > maxval).any():
708 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be greater than %s."%(name, __val, maxval))
709 if listval is not None:
710 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
713 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%(v, name, listval))
714 elif __val not in listval:
715 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%( __val, name,listval))
718 def requireInputArguments(self, mandatory=(), optional=()):
720 Permet d'imposer des arguments requises en entrée
722 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
723 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
725 def __setParameters(self, fromDico={}):
727 Permet de stocker les paramètres reçus dans le dictionnaire interne.
729 self._parameters.update( fromDico )
730 for k in self.__required_parameters.keys():
731 if k in fromDico.keys():
732 self._parameters[k] = self.setParameterValue(k,fromDico[k])
734 self._parameters[k] = self.setParameterValue(k)
735 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
737 # ==============================================================================
738 class AlgorithmAndParameters(object):
740 Classe générale d'interface d'action pour l'algorithme et ses paramètres
743 name = "GenericAlgorithm",
750 self.__name = str(name)
754 self.__algorithm = {}
755 self.__algorithmFile = None
756 self.__algorithmName = None
758 self.updateParameters( asDict, asScript )
760 if asAlgorithm is None and asScript is not None:
761 __Algo = ImportFromScript(asScript).getvalue( "Algorithm" )
765 if __Algo is not None:
766 self.__A = str(__Algo)
767 self.__P.update( {"Algorithm":self.__A} )
769 self.__setAlgorithm( self.__A )
771 def updateParameters(self,
775 "Mise a jour des parametres"
776 if asDict is None and asScript is not None:
777 __Dict = ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
781 if __Dict is not None:
782 self.__P.update( dict(__Dict) )
784 def executePythonScheme(self, asDictAO = None):
785 "Permet de lancer le calcul d'assimilation"
786 Operator.CM.clearCache()
788 if not isinstance(asDictAO, dict):
789 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
790 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
791 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
792 else: self.__Xb = None
793 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
794 else: self.__Y = asDictAO["Observation"]
795 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
796 else: self.__U = asDictAO["ControlInput"]
797 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
798 else: self.__HO = asDictAO["ObservationOperator"]
799 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
800 else: self.__EM = asDictAO["EvolutionModel"]
801 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
802 else: self.__CM = asDictAO["ControlModel"]
803 self.__B = asDictAO["BackgroundError"]
804 self.__R = asDictAO["ObservationError"]
805 self.__Q = asDictAO["EvolutionError"]
807 self.__shape_validate()
809 self.__algorithm.run(
819 Parameters = self.__P,
823 def executeYACSScheme(self, FileName=None):
824 "Permet de lancer le calcul d'assimilation"
825 if FileName is None or not os.path.exists(FileName):
826 raise ValueError("a YACS file name has to be given for YACS execution.\n")
827 if not PlatformInfo.has_salome or \
828 not PlatformInfo.has_yacs or \
829 not PlatformInfo.has_adao:
830 raise ImportError("\n\n"+\
831 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
832 "Please load the right environnement before trying to use it.\n")
837 SALOMERuntime.RuntimeSALOME_setRuntime()
839 r = pilot.getRuntime()
840 xmlLoader = loader.YACSLoader()
841 xmlLoader.registerProcCataLoader()
843 catalogAd = r.loadCatalog("proc", os.path.abspath(FileName))
844 r.addCatalog(catalogAd)
849 p = xmlLoader.load(os.path.abspath(FileName))
850 except IOError as ex:
851 print("The YACS XML schema file can not be loaded: %s"%(ex,))
853 logger = p.getLogger("parser")
854 if not logger.isEmpty():
855 print("The imported YACS XML schema has errors on parsing:")
856 print(logger.getStr())
859 print("The YACS XML schema is not valid and will not be executed:")
860 print(p.getErrorReport())
862 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
863 p.checkConsistency(info)
864 if info.areWarningsOrErrors():
865 print("The YACS XML schema is not coherent and will not be executed:")
866 print(info.getGlobalRepr())
868 e = pilot.ExecutorSwig()
870 if p.getEffectiveState() != pilot.DONE:
871 print(p.getErrorReport())
875 def get(self, key = None):
876 "Vérifie l'existence d'une clé de variable ou de paramètres"
877 if key in self.__algorithm:
878 return self.__algorithm.get( key )
879 elif key in self.__P:
885 "Necessaire pour le pickling"
886 return self.__algorithm.pop(k, d)
888 def getAlgorithmRequiredParameters(self, noDetails=True):
889 "Renvoie la liste des paramètres requis selon l'algorithme"
890 return self.__algorithm.getRequiredParameters(noDetails)
892 def setObserver(self, __V, __O, __I, __S):
893 if self.__algorithm is None \
894 or isinstance(self.__algorithm, dict) \
895 or not hasattr(self.__algorithm,"StoredVariables"):
896 raise ValueError("No observer can be build before choosing an algorithm.")
897 if __V not in self.__algorithm:
898 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
900 self.__algorithm.StoredVariables[ __V ].setDataObserver(
903 HookParameters = __I,
906 def removeObserver(self, __V, __O, __A = False):
907 if self.__algorithm is None \
908 or isinstance(self.__algorithm, dict) \
909 or not hasattr(self.__algorithm,"StoredVariables"):
910 raise ValueError("No observer can be removed before choosing an algorithm.")
911 if __V not in self.__algorithm:
912 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
914 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
919 def hasObserver(self, __V):
920 if self.__algorithm is None \
921 or isinstance(self.__algorithm, dict) \
922 or not hasattr(self.__algorithm,"StoredVariables"):
924 if __V not in self.__algorithm:
926 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
929 return list(self.__algorithm.keys()) + list(self.__P.keys())
931 def __contains__(self, key=None):
932 "D.__contains__(k) -> True if D has a key k, else False"
933 return key in self.__algorithm or key in self.__P
936 "x.__repr__() <==> repr(x)"
937 return repr(self.__A)+", "+repr(self.__P)
940 "x.__str__() <==> str(x)"
941 return str(self.__A)+", "+str(self.__P)
943 def __setAlgorithm(self, choice = None ):
945 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
946 d'assimilation. L'argument est un champ caractère se rapportant au nom
947 d'un algorithme réalisant l'opération sur les arguments fixes.
950 raise ValueError("Error: algorithm choice has to be given")
951 if self.__algorithmName is not None:
952 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
953 daDirectory = "daAlgorithms"
955 # Recherche explicitement le fichier complet
956 # ------------------------------------------
958 for directory in sys.path:
959 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
960 module_path = os.path.abspath(os.path.join(directory, daDirectory))
961 if module_path is None:
962 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
964 # Importe le fichier complet comme un module
965 # ------------------------------------------
967 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
968 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
969 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
970 raise ImportError("this module does not define a valid elementary algorithm.")
971 self.__algorithmName = str(choice)
972 sys.path = sys_path_tmp ; del sys_path_tmp
973 except ImportError as e:
974 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
976 # Instancie un objet du type élémentaire du fichier
977 # -------------------------------------------------
978 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
981 def __shape_validate(self):
983 Validation de la correspondance correcte des tailles des variables et
984 des matrices s'il y en a.
986 if self.__Xb is None: __Xb_shape = (0,)
987 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
988 elif hasattr(self.__Xb,"shape"):
989 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
990 else: __Xb_shape = self.__Xb.shape()
991 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
993 if self.__Y is None: __Y_shape = (0,)
994 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
995 elif hasattr(self.__Y,"shape"):
996 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
997 else: __Y_shape = self.__Y.shape()
998 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1000 if self.__U is None: __U_shape = (0,)
1001 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1002 elif hasattr(self.__U,"shape"):
1003 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1004 else: __U_shape = self.__U.shape()
1005 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1007 if self.__B is None: __B_shape = (0,0)
1008 elif hasattr(self.__B,"shape"):
1009 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1010 else: __B_shape = self.__B.shape()
1011 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1013 if self.__R is None: __R_shape = (0,0)
1014 elif hasattr(self.__R,"shape"):
1015 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1016 else: __R_shape = self.__R.shape()
1017 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1019 if self.__Q is None: __Q_shape = (0,0)
1020 elif hasattr(self.__Q,"shape"):
1021 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1022 else: __Q_shape = self.__Q.shape()
1023 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1025 if len(self.__HO) == 0: __HO_shape = (0,0)
1026 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1027 elif hasattr(self.__HO["Direct"],"shape"):
1028 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1029 else: __HO_shape = self.__HO["Direct"].shape()
1030 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1032 if len(self.__EM) == 0: __EM_shape = (0,0)
1033 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1034 elif hasattr(self.__EM["Direct"],"shape"):
1035 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1036 else: __EM_shape = self.__EM["Direct"].shape()
1037 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1039 if len(self.__CM) == 0: __CM_shape = (0,0)
1040 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1041 elif hasattr(self.__CM["Direct"],"shape"):
1042 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1043 else: __CM_shape = self.__CM["Direct"].shape()
1044 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1046 # Vérification des conditions
1047 # ---------------------------
1048 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1049 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1050 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1051 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1053 if not( min(__B_shape) == max(__B_shape) ):
1054 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1055 if not( min(__R_shape) == max(__R_shape) ):
1056 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1057 if not( min(__Q_shape) == max(__Q_shape) ):
1058 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1059 if not( min(__EM_shape) == max(__EM_shape) ):
1060 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1062 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1063 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1064 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1065 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1066 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1067 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1068 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1069 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1071 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1072 if self.__algorithmName in ["EnsembleBlue",]:
1073 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1074 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1075 for member in asPersistentVector:
1076 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1077 __Xb_shape = min(__B_shape)
1079 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1081 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1082 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1084 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1085 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1087 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1088 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1090 if ("Bounds" in self.__P) \
1091 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1092 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1093 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1094 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1098 # ==============================================================================
1099 class DataObserver(object):
1101 Classe générale d'interface de type observer
1104 name = "GenericObserver",
1116 self.__name = str(name)
1121 if onVariable is None:
1122 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1123 elif type(onVariable) in (tuple, list):
1124 self.__V = tuple(map( str, onVariable ))
1125 if withInfo is None:
1128 self.__I = (str(withInfo),)*len(self.__V)
1129 elif isinstance(onVariable, str):
1130 self.__V = (onVariable,)
1131 if withInfo is None:
1132 self.__I = (onVariable,)
1134 self.__I = (str(withInfo),)
1136 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1138 if asString is not None:
1139 __FunctionText = asString
1140 elif (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1141 __FunctionText = Templates.ObserverTemplates[asTemplate]
1142 elif asScript is not None:
1143 __FunctionText = ImportFromScript(asScript).getstring()
1146 __Function = ObserverF(__FunctionText)
1148 if asObsObject is not None:
1149 self.__O = asObsObject
1151 self.__O = __Function.getfunc()
1153 for k in range(len(self.__V)):
1156 if ename not in withAlgo:
1157 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1159 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1162 "x.__repr__() <==> repr(x)"
1163 return repr(self.__V)+"\n"+repr(self.__O)
1166 "x.__str__() <==> str(x)"
1167 return str(self.__V)+"\n"+str(self.__O)
1169 # ==============================================================================
1170 class State(object):
1172 Classe générale d'interface de type état
1175 name = "GenericVector",
1177 asPersistentVector = None,
1180 toBeChecked = False,
1183 Permet de définir un vecteur :
1184 - asVector : entrée des données, comme un vecteur compatible avec le
1185 constructeur de numpy.matrix, ou "True" si entrée par script.
1186 - asPersistentVector : entrée des données, comme une série de vecteurs
1187 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1188 type Persistence, ou "True" si entrée par script.
1189 - asScript : si un script valide est donné contenant une variable
1190 nommée "name", la variable est de type "asVector" (par défaut) ou
1191 "asPersistentVector" selon que l'une de ces variables est placée à
1194 self.__name = str(name)
1195 self.__check = bool(toBeChecked)
1199 self.__is_vector = False
1200 self.__is_series = False
1202 if asScript is not None:
1203 __Vector, __Series = None, None
1204 if asPersistentVector:
1205 __Series = ImportFromScript(asScript).getvalue( self.__name )
1207 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1209 __Vector, __Series = asVector, asPersistentVector
1211 if __Vector is not None:
1212 self.__is_vector = True
1213 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1214 self.shape = self.__V.shape
1215 self.size = self.__V.size
1216 elif __Series is not None:
1217 self.__is_series = True
1218 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1219 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1220 if isinstance(__Series, str): __Series = eval(__Series)
1221 for member in __Series:
1222 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1223 import sys ; sys.stdout.flush()
1226 if isinstance(self.__V.shape, (tuple, list)):
1227 self.shape = self.__V.shape
1229 self.shape = self.__V.shape()
1230 if len(self.shape) == 1:
1231 self.shape = (self.shape[0],1)
1232 self.size = self.shape[0] * self.shape[1]
1234 raise ValueError("The %s object is improperly defined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1236 if scheduledBy is not None:
1237 self.__T = scheduledBy
1239 def getO(self, withScheduler=False):
1241 return self.__V, self.__T
1242 elif self.__T is None:
1248 "Vérification du type interne"
1249 return self.__is_vector
1252 "Vérification du type interne"
1253 return self.__is_series
1256 "x.__repr__() <==> repr(x)"
1257 return repr(self.__V)
1260 "x.__str__() <==> str(x)"
1261 return str(self.__V)
1263 # ==============================================================================
1264 class Covariance(object):
1266 Classe générale d'interface de type covariance
1269 name = "GenericCovariance",
1270 asCovariance = None,
1271 asEyeByScalar = None,
1272 asEyeByVector = None,
1275 toBeChecked = False,
1278 Permet de définir une covariance :
1279 - asCovariance : entrée des données, comme une matrice compatible avec
1280 le constructeur de numpy.matrix
1281 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1282 multiplicatif d'une matrice de corrélation identité, aucune matrice
1283 n'étant donc explicitement à donner
1284 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1285 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1286 n'étant donc explicitement à donner
1287 - asCovObject : entrée des données comme un objet python, qui a les
1288 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1289 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1290 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1291 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1292 pleine doit être vérifié
1294 self.__name = str(name)
1295 self.__check = bool(toBeChecked)
1298 self.__is_scalar = False
1299 self.__is_vector = False
1300 self.__is_matrix = False
1301 self.__is_object = False
1303 if asScript is not None:
1304 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1306 __Scalar = ImportFromScript(asScript).getvalue( self.__name )
1308 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1310 __Object = ImportFromScript(asScript).getvalue( self.__name )
1312 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
1314 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1316 if __Scalar is not None:
1317 if numpy.matrix(__Scalar).size != 1:
1318 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size)
1319 self.__is_scalar = True
1320 self.__C = numpy.abs( float(__Scalar) )
1323 elif __Vector is not None:
1324 self.__is_vector = True
1325 self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) )
1326 self.shape = (self.__C.size,self.__C.size)
1327 self.size = self.__C.size**2
1328 elif __Matrix is not None:
1329 self.__is_matrix = True
1330 self.__C = numpy.matrix( __Matrix, float )
1331 self.shape = self.__C.shape
1332 self.size = self.__C.size
1333 elif __Object is not None:
1334 self.__is_object = True
1336 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"):
1337 if not hasattr(self.__C,at):
1338 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1339 if hasattr(self.__C,"shape"):
1340 self.shape = self.__C.shape
1343 if hasattr(self.__C,"size"):
1344 self.size = self.__C.size
1349 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1353 def __validate(self):
1355 if self.__C is None:
1356 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1357 if self.ismatrix() and min(self.shape) != max(self.shape):
1358 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1359 if self.isobject() and min(self.shape) != max(self.shape):
1360 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1361 if self.isscalar() and self.__C <= 0:
1362 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1363 if self.isvector() and (self.__C <= 0).any():
1364 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1365 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1367 L = numpy.linalg.cholesky( self.__C )
1369 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1370 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1372 L = self.__C.cholesky()
1374 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1377 "Vérification du type interne"
1378 return self.__is_scalar
1381 "Vérification du type interne"
1382 return self.__is_vector
1385 "Vérification du type interne"
1386 return self.__is_matrix
1389 "Vérification du type interne"
1390 return self.__is_object
1395 return Covariance(self.__name+"I", asCovariance = self.__C.I )
1396 elif self.isvector():
1397 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1398 elif self.isscalar():
1399 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1400 elif self.isobject():
1401 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1403 return None # Indispensable
1408 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1409 elif self.isvector():
1410 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1411 elif self.isscalar():
1412 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1413 elif self.isobject():
1414 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1417 "Décomposition de Cholesky"
1419 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1420 elif self.isvector():
1421 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1422 elif self.isscalar():
1423 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1424 elif self.isobject() and hasattr(self.__C,"cholesky"):
1425 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1427 def choleskyI(self):
1428 "Inversion de la décomposition de Cholesky"
1430 return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I )
1431 elif self.isvector():
1432 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1433 elif self.isscalar():
1434 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1435 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1436 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1438 def diag(self, msize=None):
1439 "Diagonale de la matrice"
1441 return numpy.diag(self.__C)
1442 elif self.isvector():
1444 elif self.isscalar():
1446 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1448 return self.__C * numpy.ones(int(msize))
1449 elif self.isobject():
1450 return self.__C.diag()
1452 def asfullmatrix(self, msize=None):
1456 elif self.isvector():
1457 return numpy.matrix( numpy.diag(self.__C), float )
1458 elif self.isscalar():
1460 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1462 return numpy.matrix( self.__C * numpy.eye(int(msize)), float )
1463 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1464 return self.__C.asfullmatrix()
1466 def trace(self, msize=None):
1467 "Trace de la matrice"
1469 return numpy.trace(self.__C)
1470 elif self.isvector():
1471 return float(numpy.sum(self.__C))
1472 elif self.isscalar():
1474 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1476 return self.__C * int(msize)
1477 elif self.isobject():
1478 return self.__C.trace()
1484 "x.__repr__() <==> repr(x)"
1485 return repr(self.__C)
1488 "x.__str__() <==> str(x)"
1489 return str(self.__C)
1491 def __add__(self, other):
1492 "x.__add__(y) <==> x+y"
1493 if self.ismatrix() or self.isobject():
1494 return self.__C + numpy.asmatrix(other)
1495 elif self.isvector() or self.isscalar():
1496 _A = numpy.asarray(other)
1497 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
1498 return numpy.asmatrix(_A)
1500 def __radd__(self, other):
1501 "x.__radd__(y) <==> y+x"
1502 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
1504 def __sub__(self, other):
1505 "x.__sub__(y) <==> x-y"
1506 if self.ismatrix() or self.isobject():
1507 return self.__C - numpy.asmatrix(other)
1508 elif self.isvector() or self.isscalar():
1509 _A = numpy.asarray(other)
1510 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
1511 return numpy.asmatrix(_A)
1513 def __rsub__(self, other):
1514 "x.__rsub__(y) <==> y-x"
1515 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
1518 "x.__neg__() <==> -x"
1521 def __mul__(self, other):
1522 "x.__mul__(y) <==> x*y"
1523 if self.ismatrix() and isinstance(other,numpy.matrix):
1524 return self.__C * other
1525 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1526 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1527 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1528 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1529 return self.__C * numpy.asmatrix(other)
1531 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
1532 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
1533 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1534 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
1535 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1536 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
1538 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1539 elif self.isscalar() and isinstance(other,numpy.matrix):
1540 return self.__C * other
1541 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
1542 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
1543 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1545 return self.__C * numpy.asmatrix(other)
1546 elif self.isobject():
1547 return self.__C.__mul__(other)
1549 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
1551 def __rmul__(self, other):
1552 "x.__rmul__(y) <==> y*x"
1553 if self.ismatrix() and isinstance(other,numpy.matrix):
1554 return other * self.__C
1555 elif self.isvector() and isinstance(other,numpy.matrix):
1556 if numpy.ravel(other).size == self.shape[0]: # Vecteur
1557 return numpy.asmatrix(numpy.ravel(other) * self.__C)
1558 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
1559 return numpy.asmatrix(numpy.array(other) * self.__C)
1561 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1562 elif self.isscalar() and isinstance(other,numpy.matrix):
1563 return other * self.__C
1564 elif self.isobject():
1565 return self.__C.__rmul__(other)
1567 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
1570 "x.__len__() <==> len(x)"
1571 return self.shape[0]
1573 # ==============================================================================
1574 class ObserverF(object):
1576 Creation d'une fonction d'observateur a partir de son texte
1578 def __init__(self, corps=""):
1579 self.__corps = corps
1580 def func(self,var,info):
1581 "Fonction d'observation"
1584 "Restitution du pointeur de fonction dans l'objet"
1587 # ==============================================================================
1588 class CaseLogger(object):
1590 Conservation des commandes de creation d'un cas
1592 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
1593 self.__name = str(__name)
1594 self.__objname = str(__objname)
1595 self.__logSerie = []
1596 self.__switchoff = False
1598 "TUI":Interfaces._TUIViewer,
1599 "DCT":Interfaces._DCTViewer,
1600 "SCD":Interfaces._SCDViewer,
1601 "YACS":Interfaces._YACSViewer,
1604 "TUI":Interfaces._TUIViewer,
1605 "EPD":Interfaces._EPDViewer,
1606 "DCT":Interfaces._DCTViewer,
1608 if __addViewers is not None:
1609 self.__viewers.update(dict(__addViewers))
1610 if __addLoaders is not None:
1611 self.__loaders.update(dict(__addLoaders))
1613 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
1614 "Enregistrement d'une commande individuelle"
1615 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
1616 if "self" in __keys: __keys.remove("self")
1617 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
1619 self.__switchoff = True
1621 self.__switchoff = False
1623 def dump(self, __filename=None, __format="TUI", __upa=""):
1624 "Restitution normalisée des commandes"
1625 if __format in self.__viewers:
1626 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
1628 raise ValueError("Dumping as \"%s\" is not available"%__format)
1629 return __formater.dump(__filename, __upa)
1631 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
1632 "Chargement normalisé des commandes"
1633 if __format in self.__loaders:
1634 __formater = self.__loaders[__format]()
1636 raise ValueError("Loading as \"%s\" is not available"%__format)
1637 return __formater.load(__filename, __content, __object)
1639 # ==============================================================================
1640 def CostFunction3D(_x,
1641 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
1642 _HmX = None, # Simulation déjà faite de Hm(x)
1643 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
1648 _SIV = False, # A résorber pour la 8.0
1649 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
1650 _nPS = 0, # nbPreviousSteps
1651 _QM = "DA", # QualityMeasure
1652 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
1653 _fRt = False, # Restitue ou pas la sortie étendue
1654 _sSc = True, # Stocke ou pas les SSC
1657 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
1658 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
1659 DFO, QuantileRegression
1665 for k in ["CostFunctionJ",
1671 "SimulatedObservationAtCurrentOptimum",
1672 "SimulatedObservationAtCurrentState",
1676 if hasattr(_SSV[k],"store"):
1677 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
1679 _X = numpy.asmatrix(numpy.ravel( _x )).T
1680 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
1681 _SSV["CurrentState"].append( _X )
1683 if _HmX is not None:
1687 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
1691 _HX = _Hm( _X, *_arg )
1692 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
1694 if "SimulatedObservationAtCurrentState" in _SSC or \
1695 "SimulatedObservationAtCurrentOptimum" in _SSC:
1696 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
1698 if numpy.any(numpy.isnan(_HX)):
1699 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
1701 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
1702 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
1703 if _BI is None or _RI is None:
1704 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
1705 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
1706 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
1707 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1708 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
1710 raise ValueError("Observation error covariance matrix has to be properly defined!")
1712 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1713 elif _QM in ["LeastSquares", "LS", "L2"]:
1715 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
1716 elif _QM in ["AbsoluteValue", "L1"]:
1718 Jo = numpy.sum( numpy.abs(_Y - _HX) )
1719 elif _QM in ["MaximumError", "ME"]:
1721 Jo = numpy.max( numpy.abs(_Y - _HX) )
1722 elif _QM in ["QR", "Null"]:
1726 raise ValueError("Unknown asked quality measure!")
1728 J = float( Jb ) + float( Jo )
1731 _SSV["CostFunctionJb"].append( Jb )
1732 _SSV["CostFunctionJo"].append( Jo )
1733 _SSV["CostFunctionJ" ].append( J )
1735 if "IndexOfOptimum" in _SSC or \
1736 "CurrentOptimum" in _SSC or \
1737 "SimulatedObservationAtCurrentOptimum" in _SSC:
1738 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
1739 if "IndexOfOptimum" in _SSC:
1740 _SSV["IndexOfOptimum"].append( IndexMin )
1741 if "CurrentOptimum" in _SSC:
1742 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
1743 if "SimulatedObservationAtCurrentOptimum" in _SSC:
1744 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
1749 if _QM in ["QR"]: # Pour le QuantileRegression
1754 # ==============================================================================
1755 if __name__ == "__main__":
1756 print('\n AUTODIAGNOSTIC \n')