1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2017 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 Ce module est destiné à être appelée par AssimilationStudy.
28 __author__ = "Jean-Philippe ARGAUD"
31 import os, sys, logging, copy
33 from daCore import Persistence
34 from daCore import PlatformInfo
35 from daCore import Templates
37 # ==============================================================================
38 class CacheManager(object):
40 Classe générale de gestion d'un cache de calculs
43 toleranceInRedundancy = 1.e-18,
44 lenghtOfRedundancy = -1,
47 Les caractéristiques de tolérance peuvent être modifées à la création.
49 self.__tolerBP = float(toleranceInRedundancy)
50 self.__lenghtOR = int(lenghtOfRedundancy)
51 self.__initlnOR = self.__lenghtOR
56 self.__listOPCV = [] # Operator Previous Calculated Points, Results, Point Norms
57 # logging.debug("CM Tolerance de determination des doublons : %.2e", self.__tolerBP)
59 def wasCalculatedIn(self, xValue ): #, info="" ):
60 "Vérifie l'existence d'un calcul correspondant à la valeur"
63 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
64 if xValue.size != self.__listOPCV[i][0].size:
65 # logging.debug("CM Différence de la taille %s de X et de celle %s du point %i déjà calculé", xValue.shape,i,self.__listOPCP[i].shape)
67 if numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < self.__tolerBP * self.__listOPCV[i][2]:
69 __HxV = self.__listOPCV[i][1]
70 # logging.debug("CM Cas%s déja calculé, portant le numéro %i", info, i)
74 def storeValueInX(self, xValue, HxValue ):
75 "Stocke un calcul correspondant à la valeur"
76 if self.__lenghtOR < 0:
77 self.__lenghtOR = 2 * xValue.size + 2
78 self.__initlnOR = self.__lenghtOR
79 while len(self.__listOPCV) > self.__lenghtOR:
80 # logging.debug("CM Réduction de la liste des cas à %i éléments par suppression du premier", self.__lenghtOR)
81 self.__listOPCV.pop(0)
82 self.__listOPCV.append( (
83 copy.copy(numpy.ravel(xValue)),
85 numpy.linalg.norm(xValue),
90 self.__initlnOR = self.__lenghtOR
95 self.__lenghtOR = self.__initlnOR
97 # ==============================================================================
98 class Operator(object):
100 Classe générale d'interface de type opérateur simple
107 def __init__(self, fromMethod=None, fromMatrix=None, avoidingRedundancy = True):
109 On construit un objet de ce type en fournissant à l'aide de l'un des
110 deux mots-clé, soit une fonction python, soit une matrice.
112 - fromMethod : argument de type fonction Python
113 - fromMatrix : argument adapté au constructeur numpy.matrix
114 - avoidingRedundancy : évite ou pas les calculs redondants
116 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
117 self.__AvoidRC = bool( avoidingRedundancy )
118 if fromMethod is not None:
119 self.__Method = fromMethod
121 self.__Type = "Method"
122 elif fromMatrix is not None:
124 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
125 self.__Type = "Matrix"
131 def disableAvoidingRedundancy(self):
133 Operator.CM.disable()
135 def enableAvoidingRedundancy(self):
140 Operator.CM.disable()
146 def appliedTo(self, xValue, HValue = None):
148 Permet de restituer le résultat de l'application de l'opérateur à un
149 argument xValue. Cette méthode se contente d'appliquer, son argument
150 devant a priori être du bon type.
152 - xValue : argument adapté pour appliquer l'opérateur
154 if HValue is not None:
155 HxValue = numpy.asmatrix( numpy.ravel( HValue ) ).T
157 Operator.CM.storeValueInX(xValue,HxValue)
160 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xValue)
162 __alreadyCalculated = False
164 if __alreadyCalculated:
165 self.__addOneCacheCall()
168 if self.__Matrix is not None:
169 self.__addOneMatrixCall()
170 HxValue = self.__Matrix * xValue
172 self.__addOneMethodCall()
173 HxValue = self.__Method( xValue )
175 Operator.CM.storeValueInX(xValue,HxValue)
179 def appliedControledFormTo(self, paire ):
181 Permet de restituer le résultat de l'application de l'opérateur à une
182 paire (xValue, uValue). Cette méthode se contente d'appliquer, son
183 argument devant a priori être du bon type. Si la uValue est None,
184 on suppose que l'opérateur ne s'applique qu'à xValue.
186 - xValue : argument X adapté pour appliquer l'opérateur
187 - uValue : argument U adapté pour appliquer l'opérateur
189 assert len(paire) == 2, "Incorrect number of arguments"
190 xValue, uValue = paire
191 if self.__Matrix is not None:
192 self.__addOneMatrixCall()
193 return self.__Matrix * xValue
194 elif uValue is not None:
195 self.__addOneMethodCall()
196 return self.__Method( (xValue, uValue) )
198 self.__addOneMethodCall()
199 return self.__Method( xValue )
201 def appliedInXTo(self, paire ):
203 Permet de restituer le résultat de l'application de l'opérateur à un
204 argument xValue, sachant que l'opérateur est valable en xNominal.
205 Cette méthode se contente d'appliquer, son argument devant a priori
206 être du bon type. Si l'opérateur est linéaire car c'est une matrice,
207 alors il est valable en tout point nominal et il n'est pas nécessaire
209 Arguments : une liste contenant
210 - xNominal : argument permettant de donner le point où l'opérateur
211 est construit pour etre ensuite appliqué
212 - xValue : argument adapté pour appliquer l'opérateur
214 assert len(paire) == 2, "Incorrect number of arguments"
215 xNominal, xValue = paire
216 if self.__Matrix is not None:
217 self.__addOneMatrixCall()
218 return self.__Matrix * xValue
220 self.__addOneMethodCall()
221 return self.__Method( (xNominal, xValue) )
223 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue"):
225 Permet de renvoyer l'opérateur sous la forme d'une matrice
227 if self.__Matrix is not None:
228 self.__addOneMatrixCall()
230 elif ValueForMethodForm is not "UnknownVoidValue": # Ne pas utiliser "None"
231 self.__addOneMethodCall()
232 return numpy.matrix( self.__Method( (ValueForMethodForm, None) ) )
234 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
238 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
239 la forme d'une matrice
241 if self.__Matrix is not None:
242 return self.__Matrix.shape
244 raise ValueError("Matrix form of the operator is not available, nor the shape")
246 def nbcalls(self, which=None):
248 Renvoie les nombres d'évaluations de l'opérateur
251 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
252 self.__NbCallsAsMatrix,
253 self.__NbCallsAsMethod,
254 self.__NbCallsOfCached,
255 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
256 Operator.NbCallsAsMatrix,
257 Operator.NbCallsAsMethod,
258 Operator.NbCallsOfCached,
260 if which is None: return __nbcalls
261 else: return __nbcalls[which]
263 def __addOneMatrixCall(self):
264 "Comptabilise un appel"
265 self.__NbCallsAsMatrix += 1 # Decompte local
266 Operator.NbCallsAsMatrix += 1 # Decompte global
268 def __addOneMethodCall(self):
269 "Comptabilise un appel"
270 self.__NbCallsAsMethod += 1 # Decompte local
271 Operator.NbCallsAsMethod += 1 # Decompte global
273 def __addOneCacheCall(self):
274 "Comptabilise un appel"
275 self.__NbCallsOfCached += 1 # Decompte local
276 Operator.NbCallsOfCached += 1 # Decompte global
278 # ==============================================================================
279 class FullOperator(object):
281 Classe générale d'interface de type opérateur complet
282 (Direct, Linéaire Tangent, Adjoint)
285 name = "GenericFullOperator",
287 asOneFunction = None, # Fonction
288 asThreeFunctions = None, # Dictionnaire de fonctions
290 asDict = None, # Parameters
297 self.__name = str(name)
298 self.__check = bool(toBeChecked)
303 if (asDict is not None) and isinstance(asDict, dict):
304 __Parameters.update( asDict )
305 if "DifferentialIncrement" in asDict:
306 __Parameters["withIncrement"] = asDict["DifferentialIncrement"]
307 if "CenteredFiniteDifference" in asDict:
308 __Parameters["withCenteredDF"] = asDict["CenteredFiniteDifference"]
309 if "EnableMultiProcessing" in asDict:
310 __Parameters["withmpEnabled"] = asDict["EnableMultiProcessing"]
311 if "NumberOfProcesses" in asDict:
312 __Parameters["withmpWorkers"] = asDict["NumberOfProcesses"]
314 if asScript is not None:
315 __Matrix, __Function = None, None
317 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
319 __Function = { "Direct":ImportFromScript(asScript).getvalue( "DirectOperator" ) }
320 __Function.update({"useApproximatedDerivatives":True})
321 __Function.update(__Parameters)
322 elif asThreeFunctions:
324 "Direct" :ImportFromScript(asScript).getvalue( "DirectOperator" ),
325 "Tangent":ImportFromScript(asScript).getvalue( "TangentOperator" ),
326 "Adjoint":ImportFromScript(asScript).getvalue( "AdjointOperator" ),
328 __Function.update(__Parameters)
331 if asOneFunction is not None:
332 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
333 if asOneFunction["Direct"] is not None:
334 __Function = asOneFunction
336 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
338 __Function = { "Direct":asOneFunction }
339 __Function.update({"useApproximatedDerivatives":True})
340 __Function.update(__Parameters)
341 elif asThreeFunctions is not None:
342 if isinstance(asThreeFunctions, dict) and \
343 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
344 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
345 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
346 __Function = asThreeFunctions
347 elif isinstance(asThreeFunctions, dict) and \
348 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
349 __Function = asThreeFunctions
350 __Function.update({"useApproximatedDerivatives":True})
352 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
353 if "Direct" not in asThreeFunctions:
354 __Function["Direct"] = asThreeFunctions["Tangent"]
355 __Function.update(__Parameters)
359 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
360 # for k in ("Direct", "Tangent", "Adjoint"):
361 # if k in __Function and hasattr(__Function[k],"__class__"):
362 # if type(__Function[k]) is type(self.__init__):
363 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
365 if appliedInX is not None and isinstance(appliedInX, dict):
366 __appliedInX = appliedInX
367 elif appliedInX is not None:
368 __appliedInX = {"HXb":appliedInX}
372 if scheduledBy is not None:
373 self.__T = scheduledBy
375 if isinstance(__Function, dict) and \
376 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
377 ("Direct" in __Function) and (__Function["Direct"] is not None):
378 if "withCenteredDF" not in __Function: __Function["withCenteredDF"] = False
379 if "withIncrement" not in __Function: __Function["withIncrement"] = 0.01
380 if "withdX" not in __Function: __Function["withdX"] = None
381 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = True
382 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
383 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
384 if "withmpEnabled" not in __Function: __Function["withmpEnabled"] = False
385 if "withmpWorkers" not in __Function: __Function["withmpWorkers"] = None
386 from daNumerics.ApproximatedDerivatives import FDApproximation
387 FDA = FDApproximation(
388 Function = __Function["Direct"],
389 centeredDF = __Function["withCenteredDF"],
390 increment = __Function["withIncrement"],
391 dX = __Function["withdX"],
392 avoidingRedundancy = __Function["withAvoidingRedundancy"],
393 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
394 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
395 mpEnabled = __Function["withmpEnabled"],
396 mpWorkers = __Function["withmpWorkers"],
398 self.__FO["Direct"] = Operator( fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC )
399 self.__FO["Tangent"] = Operator( fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC )
400 self.__FO["Adjoint"] = Operator( fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC )
401 elif isinstance(__Function, dict) and \
402 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
403 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
404 self.__FO["Direct"] = Operator( fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC )
405 self.__FO["Tangent"] = Operator( fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC )
406 self.__FO["Adjoint"] = Operator( fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC )
407 elif asMatrix is not None:
408 __matrice = numpy.matrix( __Matrix, numpy.float )
409 self.__FO["Direct"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC )
410 self.__FO["Tangent"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC )
411 self.__FO["Adjoint"] = Operator( fromMatrix = __matrice.T, avoidingRedundancy = avoidRC )
414 raise ValueError("Improperly defined observation operator, it requires at minima either a matrix, a Direct for approximate derivatives or a Tangent/Adjoint pair.")
416 if __appliedInX is not None:
417 self.__FO["AppliedInX"] = {}
418 if type(__appliedInX) is not dict:
419 raise ValueError("Error: observation operator defined by \"AppliedInX\" need a dictionary as argument.")
420 for key in list(__appliedInX.keys()):
421 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
422 # Pour le cas où l'on a une vraie matrice
423 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
424 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
425 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
426 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
428 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
430 self.__FO["AppliedInX"] = None
436 "x.__repr__() <==> repr(x)"
437 return repr(self.__V)
440 "x.__str__() <==> str(x)"
443 # ==============================================================================
444 class Algorithm(object):
446 Classe générale d'interface de type algorithme
448 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
449 d'assimilation, en fournissant un container (dictionnaire) de variables
450 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
452 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
454 def __init__(self, name):
456 L'initialisation présente permet de fabriquer des variables de stockage
457 disponibles de manière générique dans les algorithmes élémentaires. Ces
458 variables de stockage sont ensuite conservées dans un dictionnaire
459 interne à l'objet, mais auquel on accède par la méthode "get".
461 Les variables prévues sont :
462 - CostFunctionJ : fonction-cout globale, somme des deux parties suivantes
463 - CostFunctionJb : partie ébauche ou background de la fonction-cout
464 - CostFunctionJo : partie observations de la fonction-cout
465 - GradientOfCostFunctionJ : gradient de la fonction-cout globale
466 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-cout
467 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-cout
468 - CurrentState : état courant lors d'itérations
469 - Analysis : l'analyse Xa
470 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
471 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
472 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
473 - Innovation : l'innovation : d = Y - H(X)
474 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
475 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
476 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
477 - MahalanobisConsistency : indicateur de consistance des covariances
478 - OMA : Observation moins Analysis : Y - Xa
479 - OMB : Observation moins Background : Y - Xb
480 - AMB : Analysis moins Background : Xa - Xb
481 - APosterioriCovariance : matrice A
482 - APosterioriVariances : variances de la matrice A
483 - APosterioriStandardDeviations : écart-types de la matrice A
484 - APosterioriCorrelations : correlations de la matrice A
485 - Residu : dans le cas des algorithmes de vérification
486 On peut rajouter des variables à stocker dans l'initialisation de
487 l'algorithme élémentaire qui va hériter de cette classe
489 logging.debug("%s Initialisation", str(name))
490 self._m = PlatformInfo.SystemUsage()
492 self._name = str( name )
493 self._parameters = {"StoreSupplementaryCalculations":[]}
494 self.__required_parameters = {}
495 self.__required_inputs = {"RequiredInputValues":{"mandatory":(), "optional":()}}
497 self.StoredVariables = {}
498 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
499 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
500 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
501 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
502 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
503 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
504 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
505 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
506 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
507 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
508 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
509 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
510 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
511 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
512 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
513 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
514 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
515 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
516 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
517 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
518 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
519 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
520 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
521 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
522 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
523 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
524 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
525 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
526 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
527 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
528 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
530 def _pre_run(self, Parameters, Xb=None, Y=None, R=None, B=None, Q=None ):
532 logging.debug("%s Lancement", self._name)
533 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
535 # Mise a jour de self._parameters avec Parameters
536 self.__setParameters(Parameters)
538 # Corrections et complements
539 def __test_vvalue( argument, variable, argname):
541 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
542 raise ValueError("%s %s vector %s has to be properly defined!"%(self._name,argname,variable))
543 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
544 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,variable))
546 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,variable))
548 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,variable,numpy.array(argument).size))
549 __test_vvalue( Xb, "Xb", "Background or initial state" )
550 __test_vvalue( Y, "Y", "Observation" )
551 def __test_cvalue( argument, variable, argname):
553 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
554 raise ValueError("%s %s error covariance matrix %s has to be properly defined!"%(self._name,argname,variable))
555 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
556 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,variable))
558 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,variable))
560 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,variable))
561 __test_cvalue( R, "R", "Observation" )
562 __test_cvalue( B, "B", "Background" )
563 __test_cvalue( Q, "Q", "Evolution" )
565 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
566 logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
568 self._parameters["Bounds"] = None
570 if logging.getLogger().level < logging.WARNING:
571 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
572 if PlatformInfo.has_scipy:
573 import scipy.optimize
574 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
576 self._parameters["optmessages"] = 15
578 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
579 if PlatformInfo.has_scipy:
580 import scipy.optimize
581 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
583 self._parameters["optmessages"] = 15
587 def _post_run(self,_oH=None):
589 if ("StoreSupplementaryCalculations" in self._parameters) and \
590 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
591 for _A in self.StoredVariables["APosterioriCovariance"]:
592 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
593 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
594 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
595 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
596 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
597 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
598 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
599 self.StoredVariables["APosterioriCorrelations"].store( _C )
601 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
602 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
603 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
604 logging.debug("%s Terminé", self._name)
607 def get(self, key=None):
609 Renvoie l'une des variables stockées identifiée par la clé, ou le
610 dictionnaire de l'ensemble des variables disponibles en l'absence de
611 clé. Ce sont directement les variables sous forme objet qui sont
612 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
613 des classes de persistance.
616 return self.StoredVariables[key]
618 return self.StoredVariables
620 def __contains__(self, key=None):
621 "D.__contains__(k) -> True if D has a key k, else False"
622 return key in self.StoredVariables
625 "D.keys() -> list of D's keys"
626 if hasattr(self, "StoredVariables"):
627 return self.StoredVariables.keys()
632 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
633 if hasattr(self, "StoredVariables"):
634 return self.StoredVariables.pop(k, d)
639 raise TypeError("pop expected at least 1 arguments, got 0")
640 "If key is not found, d is returned if given, otherwise KeyError is raised"
646 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
648 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
649 sa forme mathématique la plus naturelle possible.
651 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
653 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None):
655 Permet de définir dans l'algorithme des paramètres requis et leurs
656 caractéristiques par défaut.
659 raise ValueError("A name is mandatory to define a required parameter.")
661 self.__required_parameters[name] = {
663 "typecast" : typecast,
669 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
671 def getRequiredParameters(self, noDetails=True):
673 Renvoie la liste des noms de paramètres requis ou directement le
674 dictionnaire des paramètres requis.
677 return sorted(self.__required_parameters.keys())
679 return self.__required_parameters
681 def setParameterValue(self, name=None, value=None):
683 Renvoie la valeur d'un paramètre requis de manière contrôlée
685 default = self.__required_parameters[name]["default"]
686 typecast = self.__required_parameters[name]["typecast"]
687 minval = self.__required_parameters[name]["minval"]
688 maxval = self.__required_parameters[name]["maxval"]
689 listval = self.__required_parameters[name]["listval"]
691 if value is None and default is None:
693 elif value is None and default is not None:
694 if typecast is None: __val = default
695 else: __val = typecast( default )
697 if typecast is None: __val = value
698 else: __val = typecast( value )
700 if minval is not None and (numpy.array(__val, float) < minval).any():
701 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be less than %s."%(name, __val, minval))
702 if maxval is not None and (numpy.array(__val, float) > maxval).any():
703 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be greater than %s."%(name, __val, maxval))
704 if listval is not None:
705 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
708 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%(v, name, listval))
709 elif __val not in listval:
710 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%( __val, name,listval))
713 def requireInputArguments(self, mandatory=(), optional=()):
715 Permet d'imposer des arguments requises en entrée
717 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
718 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
720 def __setParameters(self, fromDico={}):
722 Permet de stocker les paramètres reçus dans le dictionnaire interne.
724 self._parameters.update( fromDico )
725 for k in self.__required_parameters.keys():
726 if k in fromDico.keys():
727 self._parameters[k] = self.setParameterValue(k,fromDico[k])
729 self._parameters[k] = self.setParameterValue(k)
730 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
732 # ==============================================================================
733 class Diagnostic(object):
735 Classe générale d'interface de type diagnostic
737 Ce template s'utilise de la manière suivante : il sert de classe "patron" en
738 même temps que l'une des classes de persistance, comme "OneScalar" par
741 Une classe élémentaire de diagnostic doit implémenter ses deux méthodes, la
742 méthode "_formula" pour écrire explicitement et proprement la formule pour
743 l'écriture mathématique du calcul du diagnostic (méthode interne non
744 publique), et "calculate" pour activer la précédente tout en ayant vérifié
745 et préparé les données, et pour stocker les résultats à chaque pas (méthode
746 externe d'activation).
748 def __init__(self, name = "", parameters = {}):
750 self.name = str(name)
751 self.parameters = dict( parameters )
753 def _formula(self, *args):
755 Doit implémenter l'opération élémentaire de diagnostic sous sa forme
756 mathématique la plus naturelle possible.
758 raise NotImplementedError("Diagnostic mathematical formula has not been implemented!")
760 def calculate(self, *args):
762 Active la formule de calcul avec les arguments correctement rangés
764 raise NotImplementedError("Diagnostic activation method has not been implemented!")
766 # ==============================================================================
767 class DiagnosticAndParameters(object):
769 Classe générale d'interface d'interface de type diagnostic
772 name = "GenericDiagnostic",
779 asExistingDiags = None,
783 self.__name = str(name)
789 self.__E = tuple(asExistingDiags)
790 self.__TheDiag = None
792 if asScript is not None:
793 __Diag = ImportFromScript(asScript).getvalue( "Diagnostic" )
794 __Iden = ImportFromScript(asScript).getvalue( "Identifier" )
795 __Dict = ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
796 __Unit = ImportFromScript(asScript).getvalue( "Unit" )
797 __Base = ImportFromScript(asScript).getvalue( "BaseType" )
799 __Diag = asDiagnostic
800 __Iden = asIdentifier
805 if __Diag is not None:
806 self.__D = str(__Diag)
807 if __Iden is not None:
808 self.__I = str(__Iden)
810 self.__I = str(__Diag)
811 if __Dict is not None:
812 self.__P.update( dict(__Dict) )
813 if __Unit is None or __Unit == "None":
815 if __Base is None or __Base == "None":
818 self.__setDiagnostic( self.__D, self.__I, self.__U, self.__B, self.__P, self.__E )
822 return self.__TheDiag
824 def __setDiagnostic(self, __choice = None, __name = "", __unit = "", __basetype = None, __parameters = {}, __existings = () ):
826 Permet de sélectionner un diagnostic a effectuer
829 raise ValueError("Error: diagnostic choice has to be given")
830 __daDirectory = "daDiagnostics"
832 # Recherche explicitement le fichier complet
833 # ------------------------------------------
835 for directory in sys.path:
836 if os.path.isfile(os.path.join(directory, __daDirectory, str(__choice)+'.py')):
837 __module_path = os.path.abspath(os.path.join(directory, __daDirectory))
838 if __module_path is None:
839 raise ImportError("No diagnostic module named \"%s\" was found in a \"%s\" subdirectory\n The search path is %s"%(__choice, __daDirectory, sys.path))
841 # Importe le fichier complet comme un module
842 # ------------------------------------------
844 __sys_path_tmp = sys.path ; sys.path.insert(0,__module_path)
845 self.__diagnosticFile = __import__(str(__choice), globals(), locals(), [])
846 sys.path = __sys_path_tmp ; del __sys_path_tmp
847 except ImportError as e:
848 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(__choice,e))
850 # Instancie un objet du type élémentaire du fichier
851 # -------------------------------------------------
852 if __name in __existings:
853 raise ValueError("A default input with the same name \"%s\" already exists."%str(__name))
855 self.__TheDiag = self.__diagnosticFile.ElementaryDiagnostic(
858 basetype = __basetype,
859 parameters = __parameters )
862 # ==============================================================================
863 class AlgorithmAndParameters(object):
865 Classe générale d'interface d'action pour l'algorithme et ses paramètres
868 name = "GenericAlgorithm",
875 self.__name = str(name)
879 self.__algorithm = {}
880 self.__algorithmFile = None
881 self.__algorithmName = None
883 self.updateParameters( asDict, asScript )
885 if asScript is not None:
886 __Algo = ImportFromScript(asScript).getvalue( "Algorithm" )
890 if __Algo is not None:
891 self.__A = str(__Algo)
892 self.__P.update( {"Algorithm":self.__A} )
894 self.__setAlgorithm( self.__A )
896 def updateParameters(self,
900 "Mise a jour des parametres"
901 if asScript is not None:
902 __Dict = ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
906 if __Dict is not None:
907 self.__P.update( dict(__Dict) )
909 def executePythonScheme(self, asDictAO = None):
910 "Permet de lancer le calcul d'assimilation"
911 Operator.CM.clearCache()
913 if not isinstance(asDictAO, dict):
914 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
915 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
916 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
917 else: self.__Xb = None
918 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
919 else: self.__Y = asDictAO["Observation"]
920 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
921 else: self.__U = asDictAO["ControlInput"]
922 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
923 else: self.__HO = asDictAO["ObservationOperator"]
924 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
925 else: self.__EM = asDictAO["EvolutionModel"]
926 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
927 else: self.__CM = asDictAO["ControlModel"]
928 self.__B = asDictAO["BackgroundError"]
929 self.__R = asDictAO["ObservationError"]
930 self.__Q = asDictAO["EvolutionError"]
932 self.__shape_validate()
934 self.__algorithm.run(
944 Parameters = self.__P,
948 def executeYACSScheme(self, FileName=None):
949 "Permet de lancer le calcul d'assimilation"
950 if FileName is None or not os.path.exists(FileName):
951 raise ValueError("an existing DIC Python file name has to be given for YACS execution.\n")
952 if not PlatformInfo.has_salome or not PlatformInfo.has_yacs or not PlatformInfo.has_adao:
953 raise ImportError("Unable to get SALOME, YACS or ADAO environnement variables. Please launch SALOME before executing.\n")
955 __converterExe = os.path.join(os.environ["ADAO_ROOT_DIR"], "bin/salome", "AdaoYacsSchemaCreator.py")
956 __inputFile = os.path.abspath(FileName)
957 __outputFile = __inputFile[:__inputFile.rfind(".")] + '.xml'
959 __args = ["python", __converterExe, __inputFile, __outputFile]
961 __p = subprocess.Popen(__args)
962 (__stdoutdata, __stderrdata) = __p.communicate()
963 if not os.path.exists(__outputFile):
964 __msg = "An error occured during the execution of the ADAO YACS Schema\n"
965 __msg += "Creator applied on the input file:\n"
966 __msg += " %s\n"%__inputFile
967 __msg += "If SALOME GUI is launched by command line, see errors\n"
968 __msg += "details in your terminal.\n"
969 raise ValueError(__msg)
975 SALOMERuntime.RuntimeSALOME_setRuntime()
977 r = pilot.getRuntime()
978 xmlLoader = loader.YACSLoader()
979 xmlLoader.registerProcCataLoader()
981 catalogAd = r.loadCatalog("proc", __outputFile)
984 r.addCatalog(catalogAd)
987 p = xmlLoader.load(__outputFile)
988 except IOError as ex:
989 print("IO exception: %s"%(ex,))
991 logger = p.getLogger("parser")
992 if not logger.isEmpty():
993 print("The imported file has errors :")
994 print(logger.getStr())
997 print("Le schéma n'est pas valide et ne peut pas être exécuté")
998 print(p.getErrorReport())
1000 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
1001 p.checkConsistency(info)
1002 if info.areWarningsOrErrors():
1003 print("Le schéma n'est pas cohérent et ne peut pas être exécuté")
1004 print(info.getGlobalRepr())
1006 e = pilot.ExecutorSwig()
1008 if p.getEffectiveState() != pilot.DONE:
1009 print(p.getErrorReport())
1011 raise ValueError("execution error of YACS scheme")
1015 def get(self, key = None):
1016 "Vérifie l'existence d'une clé de variable ou de paramètres"
1017 if key in self.__algorithm:
1018 return self.__algorithm.get( key )
1019 elif key in self.__P:
1020 return self.__P[key]
1024 def pop(self, k, d):
1025 "Necessaire pour le pickling"
1026 return self.__algorithm.pop(k, d)
1028 def getAlgorithmRequiredParameters(self, noDetails=True):
1029 "Renvoie la liste des paramètres requis selon l'algorithme"
1030 return self.__algorithm.getRequiredParameters(noDetails)
1032 def setObserver(self, __V, __O, __I, __S):
1033 if self.__algorithm is None \
1034 or isinstance(self.__algorithm, dict) \
1035 or not hasattr(self.__algorithm,"StoredVariables"):
1036 raise ValueError("No observer can be build before choosing an algorithm.")
1037 if __V not in self.__algorithm:
1038 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
1040 self.__algorithm.StoredVariables[ __V ].setDataObserver(
1043 HookParameters = __I,
1046 def removeObserver(self, __V, __O, __A = False):
1047 if self.__algorithm is None \
1048 or isinstance(self.__algorithm, dict) \
1049 or not hasattr(self.__algorithm,"StoredVariables"):
1050 raise ValueError("No observer can be removed before choosing an algorithm.")
1051 if __V not in self.__algorithm:
1052 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
1054 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
1059 def hasObserver(self, __V):
1060 if self.__algorithm is None \
1061 or isinstance(self.__algorithm, dict) \
1062 or not hasattr(self.__algorithm,"StoredVariables"):
1064 if __V not in self.__algorithm:
1066 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1069 return list(self.__algorithm.keys()) + list(self.__P.keys())
1071 def __contains__(self, key=None):
1072 "D.__contains__(k) -> True if D has a key k, else False"
1073 return key in self.__algorithm or key in self.__P
1076 "x.__repr__() <==> repr(x)"
1077 return repr(self.__A)+", "+repr(self.__P)
1080 "x.__str__() <==> str(x)"
1081 return str(self.__A)+", "+str(self.__P)
1083 def __setAlgorithm(self, choice = None ):
1085 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1086 d'assimilation. L'argument est un champ caractère se rapportant au nom
1087 d'un fichier contenu dans "../daAlgorithms" et réalisant l'opération
1088 d'assimilation sur les arguments fixes.
1091 raise ValueError("Error: algorithm choice has to be given")
1092 if self.__algorithmName is not None:
1093 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1094 daDirectory = "daAlgorithms"
1096 # Recherche explicitement le fichier complet
1097 # ------------------------------------------
1099 for directory in sys.path:
1100 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1101 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1102 if module_path is None:
1103 raise ImportError("No algorithm module named \"%s\" was found in a \"%s\" subdirectory\n The search path is %s"%(choice, daDirectory, sys.path))
1105 # Importe le fichier complet comme un module
1106 # ------------------------------------------
1108 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1109 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1110 self.__algorithmName = str(choice)
1111 sys.path = sys_path_tmp ; del sys_path_tmp
1112 except ImportError as e:
1113 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1115 # Instancie un objet du type élémentaire du fichier
1116 # -------------------------------------------------
1117 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1120 def __shape_validate(self):
1122 Validation de la correspondance correcte des tailles des variables et
1123 des matrices s'il y en a.
1125 if self.__Xb is None: __Xb_shape = (0,)
1126 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1127 elif hasattr(self.__Xb,"shape"):
1128 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1129 else: __Xb_shape = self.__Xb.shape()
1130 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1132 if self.__Y is None: __Y_shape = (0,)
1133 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1134 elif hasattr(self.__Y,"shape"):
1135 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1136 else: __Y_shape = self.__Y.shape()
1137 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1139 if self.__U is None: __U_shape = (0,)
1140 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1141 elif hasattr(self.__U,"shape"):
1142 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1143 else: __U_shape = self.__U.shape()
1144 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1146 if self.__B is None: __B_shape = (0,0)
1147 elif hasattr(self.__B,"shape"):
1148 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1149 else: __B_shape = self.__B.shape()
1150 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1152 if self.__R is None: __R_shape = (0,0)
1153 elif hasattr(self.__R,"shape"):
1154 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1155 else: __R_shape = self.__R.shape()
1156 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1158 if self.__Q is None: __Q_shape = (0,0)
1159 elif hasattr(self.__Q,"shape"):
1160 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1161 else: __Q_shape = self.__Q.shape()
1162 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1164 if len(self.__HO) == 0: __HO_shape = (0,0)
1165 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1166 elif hasattr(self.__HO["Direct"],"shape"):
1167 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1168 else: __HO_shape = self.__HO["Direct"].shape()
1169 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1171 if len(self.__EM) == 0: __EM_shape = (0,0)
1172 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1173 elif hasattr(self.__EM["Direct"],"shape"):
1174 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1175 else: __EM_shape = self.__EM["Direct"].shape()
1176 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1178 if len(self.__CM) == 0: __CM_shape = (0,0)
1179 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1180 elif hasattr(self.__CM["Direct"],"shape"):
1181 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1182 else: __CM_shape = self.__CM["Direct"].shape()
1183 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1185 # Vérification des conditions
1186 # ---------------------------
1187 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1188 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1189 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1190 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1192 if not( min(__B_shape) == max(__B_shape) ):
1193 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1194 if not( min(__R_shape) == max(__R_shape) ):
1195 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1196 if not( min(__Q_shape) == max(__Q_shape) ):
1197 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1198 if not( min(__EM_shape) == max(__EM_shape) ):
1199 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1201 if len(self.__HO) > 0 and not(type(self.__HO) is type({})) and not( __HO_shape[1] == max(__Xb_shape) ):
1202 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1203 if len(self.__HO) > 0 and not(type(self.__HO) is type({})) and not( __HO_shape[0] == max(__Y_shape) ):
1204 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1205 if len(self.__HO) > 0 and not(type(self.__HO) is type({})) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1206 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1207 if len(self.__HO) > 0 and not(type(self.__HO) is type({})) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1208 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1210 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1211 if self.__algorithmName in ["EnsembleBlue",]:
1212 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1213 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1214 for member in asPersistentVector:
1215 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1216 __Xb_shape = min(__B_shape)
1218 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1220 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1221 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1223 if self.__EM is not None and len(self.__EM) > 0 and not(type(self.__EM) is type({})) and not( __EM_shape[1] == max(__Xb_shape) ):
1224 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1226 if self.__CM is not None and len(self.__CM) > 0 and not(type(self.__CM) is type({})) and not( __CM_shape[1] == max(__U_shape) ):
1227 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1229 if ("Bounds" in self.__P) \
1230 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1231 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1232 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1233 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1237 # ==============================================================================
1238 class DataObserver(object):
1240 Classe générale d'interface de type observer
1243 name = "GenericObserver",
1255 self.__name = str(name)
1260 if onVariable is None:
1261 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1262 elif type(onVariable) in (tuple, list):
1263 self.__V = tuple(map( str, onVariable ))
1264 if withInfo is None:
1267 self.__I = (str(withInfo),)*len(self.__V)
1268 elif isinstance(onVariable, str):
1269 self.__V = (onVariable,)
1270 if withInfo is None:
1271 self.__I = (onVariable,)
1273 self.__I = (str(withInfo),)
1275 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1277 if asString is not None:
1278 __FunctionText = asString
1279 elif (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1280 __FunctionText = Templates.ObserverTemplates[asTemplate]
1281 elif asScript is not None:
1282 __FunctionText = ImportFromScript(asScript).getstring()
1285 __Function = ObserverF(__FunctionText)
1287 if asObsObject is not None:
1288 self.__O = asObsObject
1290 self.__O = __Function.getfunc()
1292 for k in range(len(self.__V)):
1295 if ename not in withAlgo:
1296 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1298 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1301 "x.__repr__() <==> repr(x)"
1302 return repr(self.__V)+"\n"+repr(self.__O)
1305 "x.__str__() <==> str(x)"
1306 return str(self.__V)+"\n"+str(self.__O)
1308 # ==============================================================================
1309 class State(object):
1311 Classe générale d'interface de type état
1314 name = "GenericVector",
1316 asPersistentVector = None,
1319 toBeChecked = False,
1322 Permet de définir un vecteur :
1323 - asVector : entrée des données, comme un vecteur compatible avec le
1324 constructeur de numpy.matrix, ou "True" si entrée par script.
1325 - asPersistentVector : entrée des données, comme une série de vecteurs
1326 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1327 type Persistence, ou "True" si entrée par script.
1328 - asScript : si un script valide est donné contenant une variable
1329 nommée "name", la variable est de type "asVector" (par défaut) ou
1330 "asPersistentVector" selon que l'une de ces variables est placée à
1333 self.__name = str(name)
1334 self.__check = bool(toBeChecked)
1338 self.__is_vector = False
1339 self.__is_series = False
1341 if asScript is not None:
1342 __Vector, __Series = None, None
1343 if asPersistentVector:
1344 __Series = ImportFromScript(asScript).getvalue( self.__name )
1346 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1348 __Vector, __Series = asVector, asPersistentVector
1350 if __Vector is not None:
1351 self.__is_vector = True
1352 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1353 self.shape = self.__V.shape
1354 self.size = self.__V.size
1355 elif __Series is not None:
1356 self.__is_series = True
1357 if type(__Series) in (tuple, list, numpy.ndarray, numpy.matrix):
1358 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1359 for member in __Series:
1360 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1361 import sys ; sys.stdout.flush()
1364 if type(self.__V.shape) in (tuple, list):
1365 self.shape = self.__V.shape
1367 self.shape = self.__V.shape()
1368 if len(self.shape) == 1:
1369 self.shape = (self.shape[0],1)
1370 self.size = self.shape[0] * self.shape[1]
1372 raise ValueError("The %s object is improperly defined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1374 if scheduledBy is not None:
1375 self.__T = scheduledBy
1377 def getO(self, withScheduler=False):
1379 return self.__V, self.__T
1380 elif self.__T is None:
1386 "Vérification du type interne"
1387 return self.__is_vector
1390 "Vérification du type interne"
1391 return self.__is_series
1394 "x.__repr__() <==> repr(x)"
1395 return repr(self.__V)
1398 "x.__str__() <==> str(x)"
1399 return str(self.__V)
1401 # ==============================================================================
1402 class Covariance(object):
1404 Classe générale d'interface de type covariance
1407 name = "GenericCovariance",
1408 asCovariance = None,
1409 asEyeByScalar = None,
1410 asEyeByVector = None,
1413 toBeChecked = False,
1416 Permet de définir une covariance :
1417 - asCovariance : entrée des données, comme une matrice compatible avec
1418 le constructeur de numpy.matrix
1419 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1420 multiplicatif d'une matrice de corrélation identité, aucune matrice
1421 n'étant donc explicitement à donner
1422 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1423 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1424 n'étant donc explicitement à donner
1425 - asCovObject : entrée des données comme un objet python, qui a les
1426 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1427 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1428 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1429 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1430 pleine doit être vérifié
1432 self.__name = str(name)
1433 self.__check = bool(toBeChecked)
1436 self.__is_scalar = False
1437 self.__is_vector = False
1438 self.__is_matrix = False
1439 self.__is_object = False
1441 if asScript is not None:
1442 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1444 __Scalar = ImportFromScript(asScript).getvalue( self.__name )
1446 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1448 __Object = ImportFromScript(asScript).getvalue( self.__name )
1450 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
1452 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1454 if __Scalar is not None:
1455 if numpy.matrix(__Scalar).size != 1:
1456 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size)
1457 self.__is_scalar = True
1458 self.__C = numpy.abs( float(__Scalar) )
1461 elif __Vector is not None:
1462 self.__is_vector = True
1463 self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) )
1464 self.shape = (self.__C.size,self.__C.size)
1465 self.size = self.__C.size**2
1466 elif __Matrix is not None:
1467 self.__is_matrix = True
1468 self.__C = numpy.matrix( __Matrix, float )
1469 self.shape = self.__C.shape
1470 self.size = self.__C.size
1471 elif __Object is not None:
1472 self.__is_object = True
1474 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"):
1475 if not hasattr(self.__C,at):
1476 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1477 if hasattr(self.__C,"shape"):
1478 self.shape = self.__C.shape
1481 if hasattr(self.__C,"size"):
1482 self.size = self.__C.size
1487 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1491 def __validate(self):
1493 if self.ismatrix() and min(self.shape) != max(self.shape):
1494 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1495 if self.isobject() and min(self.shape) != max(self.shape):
1496 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1497 if self.isscalar() and self.__C <= 0:
1498 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1499 if self.isvector() and (self.__C <= 0).any():
1500 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1501 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1503 L = numpy.linalg.cholesky( self.__C )
1505 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1506 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1508 L = self.__C.cholesky()
1510 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1513 "Vérification du type interne"
1514 return self.__is_scalar
1517 "Vérification du type interne"
1518 return self.__is_vector
1521 "Vérification du type interne"
1522 return self.__is_matrix
1525 "Vérification du type interne"
1526 return self.__is_object
1531 return Covariance(self.__name+"I", asCovariance = self.__C.I )
1532 elif self.isvector():
1533 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1534 elif self.isscalar():
1535 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1536 elif self.isobject():
1537 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1539 return None # Indispensable
1544 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1545 elif self.isvector():
1546 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1547 elif self.isscalar():
1548 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1549 elif self.isobject():
1550 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1553 "Décomposition de Cholesky"
1555 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1556 elif self.isvector():
1557 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1558 elif self.isscalar():
1559 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1560 elif self.isobject() and hasattr(self.__C,"cholesky"):
1561 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1563 def choleskyI(self):
1564 "Inversion de la décomposition de Cholesky"
1566 return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I )
1567 elif self.isvector():
1568 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1569 elif self.isscalar():
1570 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1571 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1572 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1574 def diag(self, msize=None):
1575 "Diagonale de la matrice"
1577 return numpy.diag(self.__C)
1578 elif self.isvector():
1580 elif self.isscalar():
1582 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1584 return self.__C * numpy.ones(int(msize))
1585 elif self.isobject():
1586 return self.__C.diag()
1588 def asfullmatrix(self, msize=None):
1592 elif self.isvector():
1593 return numpy.matrix( numpy.diag(self.__C), float )
1594 elif self.isscalar():
1596 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1598 return numpy.matrix( self.__C * numpy.eye(int(msize)), float )
1599 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1600 return self.__C.asfullmatrix()
1602 def trace(self, msize=None):
1603 "Trace de la matrice"
1605 return numpy.trace(self.__C)
1606 elif self.isvector():
1607 return float(numpy.sum(self.__C))
1608 elif self.isscalar():
1610 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1612 return self.__C * int(msize)
1613 elif self.isobject():
1614 return self.__C.trace()
1620 "x.__repr__() <==> repr(x)"
1621 return repr(self.__C)
1624 "x.__str__() <==> str(x)"
1625 return str(self.__C)
1627 def __add__(self, other):
1628 "x.__add__(y) <==> x+y"
1629 if self.ismatrix() or self.isobject():
1630 return self.__C + numpy.asmatrix(other)
1631 elif self.isvector() or self.isscalar():
1632 _A = numpy.asarray(other)
1633 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
1634 return numpy.asmatrix(_A)
1636 def __radd__(self, other):
1637 "x.__radd__(y) <==> y+x"
1638 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
1640 def __sub__(self, other):
1641 "x.__sub__(y) <==> x-y"
1642 if self.ismatrix() or self.isobject():
1643 return self.__C - numpy.asmatrix(other)
1644 elif self.isvector() or self.isscalar():
1645 _A = numpy.asarray(other)
1646 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
1647 return numpy.asmatrix(_A)
1649 def __rsub__(self, other):
1650 "x.__rsub__(y) <==> y-x"
1651 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
1654 "x.__neg__() <==> -x"
1657 def __mul__(self, other):
1658 "x.__mul__(y) <==> x*y"
1659 if self.ismatrix() and isinstance(other,numpy.matrix):
1660 return self.__C * other
1661 elif self.ismatrix() and (isinstance(other,numpy.ndarray) \
1662 or isinstance(other,list) \
1663 or isinstance(other,tuple)):
1664 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1665 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1666 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1667 return self.__C * numpy.asmatrix(other)
1669 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
1670 elif self.isvector() and (isinstance(other,numpy.matrix) \
1671 or isinstance(other,numpy.ndarray) \
1672 or isinstance(other,list) \
1673 or isinstance(other,tuple)):
1674 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1675 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
1676 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1677 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
1679 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1680 elif self.isscalar() and isinstance(other,numpy.matrix):
1681 return self.__C * other
1682 elif self.isscalar() and (isinstance(other,numpy.ndarray) \
1683 or isinstance(other,list) \
1684 or isinstance(other,tuple)):
1685 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
1686 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1688 return self.__C * numpy.asmatrix(other)
1689 elif self.isobject():
1690 return self.__C.__mul__(other)
1692 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
1694 def __rmul__(self, other):
1695 "x.__rmul__(y) <==> y*x"
1696 if self.ismatrix() and isinstance(other,numpy.matrix):
1697 return other * self.__C
1698 elif self.isvector() and isinstance(other,numpy.matrix):
1699 if numpy.ravel(other).size == self.shape[0]: # Vecteur
1700 return numpy.asmatrix(numpy.ravel(other) * self.__C)
1701 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
1702 return numpy.asmatrix(numpy.array(other) * self.__C)
1704 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1705 elif self.isscalar() and isinstance(other,numpy.matrix):
1706 return other * self.__C
1707 elif self.isobject():
1708 return self.__C.__rmul__(other)
1710 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
1713 "x.__len__() <==> len(x)"
1714 return self.shape[0]
1716 # ==============================================================================
1717 class ObserverF(object):
1719 Creation d'une fonction d'observateur a partir de son texte
1721 def __init__(self, corps=""):
1722 self.__corps = corps
1723 def func(self,var,info):
1724 "Fonction d'observation"
1727 "Restitution du pointeur de fonction dans l'objet"
1730 # ==============================================================================
1731 class CaseLogger(object):
1733 Conservation des commandes de creation d'un cas
1735 def __init__(self, __name="", __objname="case", __viewers={}, __loaders={}):
1736 self.__name = str(__name)
1737 self.__objname = str(__objname)
1738 self.__logSerie = []
1739 self.__switchoff = False
1740 self.__viewers = self.__loaders = {"TUI":_TUIViewer}
1741 self.__viewers.update(__viewers)
1742 self.__loaders.update(__loaders)
1744 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
1745 "Enregistrement d'une commande individuelle"
1746 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
1747 if "self" in __keys: __keys.remove("self")
1748 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
1750 self.__switchoff = True
1752 self.__switchoff = False
1754 def dump(self, __filename=None, __format="TUI"):
1755 "Restitution normalisée des commandes (par les *GenericCaseViewer)"
1756 if __format in self.__viewers:
1757 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
1759 raise ValueError("Dumping as \"%s\" is not available"%__format)
1760 return __formater.dump(__filename)
1762 def load(self, __filename=None, __format="TUI"):
1763 "Chargement normalisé des commandes"
1764 if __format in self.__loaders:
1765 __formater = self.__loaders[__format]()
1767 raise ValueError("Loading as \"%s\" is not available"%__format)
1768 return __formater.load(__filename)
1770 # ==============================================================================
1771 class GenericCaseViewer(object):
1773 Gestion des commandes de creation d'une vue de cas
1775 def __init__(self, __name="", __objname="case", __content=None):
1776 "Initialisation et enregistrement de l'entete"
1777 self._name = str(__name)
1778 self._objname = str(__objname)
1779 self._lineSerie = []
1780 self._switchoff = False
1781 self._numobservers = 2
1782 self._content = __content
1783 self._missing = """raise ValueError("This case requires beforehand to import or define the variable named <%s>. When corrected, remove this command, correct and uncomment the following one.")\n# """
1785 "Transformation de commande individuelle en enregistrement"
1786 raise NotImplementedError()
1788 "Transformation d'enregistrement en commande individuelle"
1789 raise NotImplementedError()
1790 def _interpret(self):
1791 "Interprétation d'une commande"
1792 raise NotImplementedError()
1793 def _finalize(self):
1794 "Enregistrement du final"
1796 def _addLine(self, line=""):
1797 "Ajoute un enregistrement individuel"
1798 self._lineSerie.append(line)
1799 def dump(self, __filename=None):
1800 "Restitution normalisée des commandes"
1802 __text = "\n".join(self._lineSerie)
1804 if __filename is not None:
1805 __file = os.path.abspath(__filename)
1806 fid = open(__file,"w")
1810 def load(self, __filename=None):
1811 "Chargement normalisé des commandes"
1812 if os.path.exists(__filename):
1813 self._content = open(__filename, 'r').read()
1814 __commands = self._extract(self._content)
1816 def execCase(self, __filename=None):
1817 "Exécution normalisée des commandes"
1818 if os.path.exists(__filename):
1819 self._content = open(__filename, 'r').read()
1820 __retcode = self._interpret(self._content)
1823 class _TUIViewer(GenericCaseViewer):
1825 Etablissement des commandes de creation d'un cas TUI
1827 def __init__(self, __name="", __objname="case", __content=None):
1828 "Initialisation et enregistrement de l'entete"
1829 GenericCaseViewer.__init__(self, __name, __objname, __content)
1830 self._addLine("#\n# Python script for ADAO TUI\n#")
1831 self._addLine("from numpy import array, matrix")
1832 self._addLine("import adaoBuilder")
1833 self._addLine("%s = adaoBuilder.New('%s')"%(self._objname, self._name))
1834 if self._content is not None:
1835 for command in self._content:
1836 self._append(*command)
1837 def _append(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
1838 "Transformation d'une commande individuelle en un enregistrement"
1839 if __command is not None and __keys is not None and __local is not None:
1841 if __pre is not None:
1842 __text += "%s = "%__pre
1843 __text += "%s.%s( "%(self._objname,str(__command))
1844 if "self" in __keys: __keys.remove("self")
1845 if __command not in ("set","get") and "Concept" in __keys: __keys.remove("Concept")
1848 if __v is None: continue
1849 if k == "Checked" and not __v: continue
1850 if k == "Stored" and not __v: continue
1851 if k == "AvoidRC" and __v: continue
1852 if k == "noDetails": continue
1853 if isinstance(__v,Persistence.Persistence): __v = __v.values()
1854 if callable(__v): __text = self._missing%__v.__name__+__text
1855 if isinstance(__v,dict):
1856 for val in __v.values():
1857 if callable(val): __text = self._missing%val.__name__+__text
1858 numpy.set_printoptions(precision=15,threshold=1000000,linewidth=1000*15)
1859 __text += "%s=%s, "%(k,repr(__v))
1860 numpy.set_printoptions(precision=8,threshold=1000,linewidth=75)
1863 self._addLine(__text)
1864 def _extract(self, __content=""):
1865 "Transformation un enregistrement en une commande individuelle"
1868 __content = __content.replace("\r\n","\n")
1869 for line in __content.split("\n"):
1870 if "adaoBuilder" in line and "=" in line:
1871 self._objname = line.split("=")[0].strip()
1876 if self._objname+".set" in line:
1877 __commands.append( line.replace(self._objname+".","",1) )
1879 def _interpret(self, __content=""):
1880 "Interprétation d'une commande"
1881 __content = __content.replace("\r\n","\n")
1885 # ==============================================================================
1886 class ImportFromScript(object):
1888 Obtention d'une variable nommee depuis un fichier script importe
1890 def __init__(self, __filename=None):
1891 "Verifie l'existence et importe le script"
1892 self.__filename = __filename.rstrip(".py")
1893 if self.__filename is None:
1894 raise ValueError("The name of the file, containing the variable to be read, has to be specified.")
1895 if not os.path.isfile(str(self.__filename)+".py"):
1896 raise ValueError("The file containing the variable to be imported doesn't seem to exist. Please check the file. The given file name is:\n \"%s\""%self.__filename)
1897 self.__scriptfile = __import__(self.__filename, globals(), locals(), [])
1898 self.__scriptstring = open(self.__filename+".py",'r').read()
1899 def getvalue(self, __varname=None, __synonym=None ):
1900 "Renvoie la variable demandee"
1901 if __varname is None:
1902 raise ValueError("The name of the variable to be read has to be specified. Please check the content of the file and the syntax.")
1903 if not hasattr(self.__scriptfile, __varname):
1904 if __synonym is None:
1905 raise ValueError("The imported script file \"%s\" doesn't contain the mandatory variable \"%s\" to be read. Please check the content of the file and the syntax."%(str(self.__filename)+".py",__varname))
1906 elif not hasattr(self.__scriptfile, __synonym):
1907 raise ValueError("The imported script file \"%s\" doesn't contain the mandatory variable \"%s\" to be read. Please check the content of the file and the syntax."%(str(self.__filename)+".py",__synonym))
1909 return getattr(self.__scriptfile, __synonym)
1911 return getattr(self.__scriptfile, __varname)
1912 def getstring(self):
1913 "Renvoie le script complet"
1914 return self.__scriptstring
1916 # ==============================================================================
1917 def CostFunction3D(_x,
1918 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
1919 _HmX = None, # Simulation déjà faite de Hm(x)
1920 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
1925 _SIV = False, # A résorber pour la 8.0
1926 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
1927 _nPS = 0, # nbPreviousSteps
1928 _QM = "DA", # QualityMeasure
1929 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
1930 _fRt = False, # Restitue ou pas la sortie étendue
1931 _sSc = True, # Stocke ou pas les SSC
1934 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
1935 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
1936 DFO, QuantileRegression
1942 for k in ["CostFunctionJ",
1948 "SimulatedObservationAtCurrentOptimum",
1949 "SimulatedObservationAtCurrentState",
1953 if hasattr(_SSV[k],"store"):
1954 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
1956 _X = numpy.asmatrix(numpy.ravel( _x )).T
1957 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
1958 _SSV["CurrentState"].append( _X )
1960 if _HmX is not None:
1964 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
1968 _HX = _Hm( _X, *_arg )
1969 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
1971 if "SimulatedObservationAtCurrentState" in _SSC or \
1972 "SimulatedObservationAtCurrentOptimum" in _SSC:
1973 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
1975 if numpy.any(numpy.isnan(_HX)):
1976 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
1978 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
1979 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
1980 if _BI is None or _RI is None:
1981 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
1982 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
1983 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
1984 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1985 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
1987 raise ValueError("Observation error covariance matrix has to be properly defined!")
1989 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1990 elif _QM in ["LeastSquares", "LS", "L2"]:
1992 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
1993 elif _QM in ["AbsoluteValue", "L1"]:
1995 Jo = numpy.sum( numpy.abs(_Y - _HX) )
1996 elif _QM in ["MaximumError", "ME"]:
1998 Jo = numpy.max( numpy.abs(_Y - _HX) )
1999 elif _QM in ["QR", "Null"]:
2003 raise ValueError("Unknown asked quality measure!")
2005 J = float( Jb ) + float( Jo )
2008 _SSV["CostFunctionJb"].append( Jb )
2009 _SSV["CostFunctionJo"].append( Jo )
2010 _SSV["CostFunctionJ" ].append( J )
2012 if "IndexOfOptimum" in _SSC or \
2013 "CurrentOptimum" in _SSC or \
2014 "SimulatedObservationAtCurrentOptimum" in _SSC:
2015 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
2016 if "IndexOfOptimum" in _SSC:
2017 _SSV["IndexOfOptimum"].append( IndexMin )
2018 if "CurrentOptimum" in _SSC:
2019 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
2020 if "SimulatedObservationAtCurrentOptimum" in _SSC:
2021 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
2026 if _QM in ["QR"]: # Pour le QuantileRegression
2031 # ==============================================================================
2032 if __name__ == "__main__":
2033 print('\n AUTODIAGNOSTIC \n')