1 # -*- coding: utf-8 -*-
3 # Copyright (C) 2008-2018 EDF R&D
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 # See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
21 # Author: Jean-Philippe Argaud, jean-philippe.argaud@edf.fr, EDF R&D
24 Définit les outils généraux élémentaires.
26 __author__ = "Jean-Philippe ARGAUD"
34 from functools import partial
35 from daCore import Persistence
36 from daCore import PlatformInfo
37 from daCore import Interfaces
38 from daCore import Templates
39 from daCore.Interfaces import ImportFromScript, ImportFromFile
41 # ==============================================================================
42 class CacheManager(object):
44 Classe générale de gestion d'un cache de calculs
47 toleranceInRedundancy = 1.e-18,
48 lenghtOfRedundancy = -1,
51 Les caractéristiques de tolérance peuvent être modifées à la création.
53 self.__tolerBP = float(toleranceInRedundancy)
54 self.__lenghtOR = int(lenghtOfRedundancy)
55 self.__initlnOR = self.__lenghtOR
60 self.__listOPCV = [] # Operator Previous Calculated Points, Results, Point Norms
61 # logging.debug("CM Tolerance de determination des doublons : %.2e", self.__tolerBP)
63 def wasCalculatedIn(self, xValue ): #, info="" ):
64 "Vérifie l'existence d'un calcul correspondant à la valeur"
67 for i in range(min(len(self.__listOPCV),self.__lenghtOR)-1,-1,-1):
68 if not hasattr(xValue, 'size') or (xValue.size != self.__listOPCV[i][0].size):
69 # logging.debug("CM Différence de la taille %s de X et de celle %s du point %i déjà calculé", xValue.shape,i,self.__listOPCP[i].shape)
71 if numpy.linalg.norm(numpy.ravel(xValue) - self.__listOPCV[i][0]) < self.__tolerBP * self.__listOPCV[i][2]:
73 __HxV = self.__listOPCV[i][1]
74 # logging.debug("CM Cas%s déja calculé, portant le numéro %i", info, i)
78 def storeValueInX(self, xValue, HxValue ):
79 "Stocke un calcul correspondant à la valeur"
80 if self.__lenghtOR < 0:
81 self.__lenghtOR = 2 * xValue.size + 2
82 self.__initlnOR = self.__lenghtOR
83 while len(self.__listOPCV) > self.__lenghtOR:
84 # logging.debug("CM Réduction de la liste des cas à %i éléments par suppression du premier", self.__lenghtOR)
85 self.__listOPCV.pop(0)
86 self.__listOPCV.append( (
87 copy.copy(numpy.ravel(xValue)),
89 numpy.linalg.norm(xValue),
94 self.__initlnOR = self.__lenghtOR
99 self.__lenghtOR = self.__initlnOR
101 # ==============================================================================
102 class Operator(object):
104 Classe générale d'interface de type opérateur simple
111 def __init__(self, fromMethod=None, fromMatrix=None, avoidingRedundancy = True, inputAsMultiFunction = False):
113 On construit un objet de ce type en fournissant, à l'aide de l'un des
114 deux mots-clé, soit une fonction ou un multi-fonction python, soit une
117 - fromMethod : argument de type fonction Python
118 - fromMatrix : argument adapté au constructeur numpy.matrix
119 - avoidingRedundancy : évite ou pas les calculs redondants
120 - inputAsMultiFunction : fonction explicitement définie ou pas en multi-fonction
122 self.__NbCallsAsMatrix, self.__NbCallsAsMethod, self.__NbCallsOfCached = 0, 0, 0
123 self.__AvoidRC = bool( avoidingRedundancy )
124 self.__inputAsMF = bool( inputAsMultiFunction )
125 if fromMethod is not None and self.__inputAsMF:
126 self.__Method = fromMethod # logtimer(fromMethod)
128 self.__Type = "Method"
129 elif fromMethod is not None and not self.__inputAsMF:
130 self.__Method = partial( MultiFonction, _sFunction=fromMethod)
132 self.__Type = "Method"
133 elif fromMatrix is not None:
135 self.__Matrix = numpy.matrix( fromMatrix, numpy.float )
136 self.__Type = "Matrix"
142 def disableAvoidingRedundancy(self):
144 Operator.CM.disable()
146 def enableAvoidingRedundancy(self):
151 Operator.CM.disable()
157 def appliedTo(self, xValue, HValue = None, argsAsSerie = False):
159 Permet de restituer le résultat de l'application de l'opérateur à un
160 argument xValue. Cette méthode se contente d'appliquer, son argument
161 devant a priori être du bon type.
163 - xValue : argument adapté pour appliquer l'opérateur
170 if HValue is not None:
174 PlatformInfo.isIterable( _xValue, True, " in Operator.appliedTo" )
176 if _HValue is not None:
177 assert len(_xValue) == len(_HValue), "Incompatible number of elements in xValue and HValue"
179 for i in range(len(_HValue)):
180 HxValue.append( numpy.asmatrix( numpy.ravel( _HValue[i] ) ).T )
182 Operator.CM.storeValueInX(_xValue[i],HxValue[-1])
187 for i, xv in enumerate(_xValue):
189 __alreadyCalculated, __HxV = Operator.CM.wasCalculatedIn(xv)
191 __alreadyCalculated = False
193 if __alreadyCalculated:
194 self.__addOneCacheCall()
197 if self.__Matrix is not None:
198 self.__addOneMatrixCall()
199 _hv = self.__Matrix * xv
201 self.__addOneMethodCall()
205 HxValue.append( _hv )
207 if len(_xserie)>0 and self.__Matrix is None:
208 _hserie = self.__Method( _xserie ) # Calcul MF
209 if not hasattr(_hserie, "pop"):
210 raise TypeError("The user input multi-function doesn't seem to return sequence results, behaving like a mono-function. It has to be checked.")
216 Operator.CM.storeValueInX(_xv,_hv)
218 if argsAsSerie: return HxValue
219 else: return HxValue[-1]
221 def appliedControledFormTo(self, paires, argsAsSerie = False ):
223 Permet de restituer le résultat de l'application de l'opérateur à une
224 paire (xValue, uValue). Cette méthode se contente d'appliquer, son
225 argument devant a priori être du bon type. Si la uValue est None,
226 on suppose que l'opérateur ne s'applique qu'à xValue.
228 - xValue : argument X adapté pour appliquer l'opérateur
229 - uValue : argument U adapté pour appliquer l'opérateur
231 if argsAsSerie: _xuValue = paires
232 else: _xuValue = (paires,)
233 PlatformInfo.isIterable( _xuValue, True, " in Operator.appliedControledFormTo" )
235 if self.__Matrix is not None:
237 for paire in _xuValue:
238 _xValue, _uValue = paire
239 self.__addOneMatrixCall()
240 HxValue.append( self.__Matrix * _xValue )
243 for paire in _xuValue:
245 _xValue, _uValue = paire
246 if _uValue is not None:
247 _xuValue.append( paire )
249 _xuValue.append( _xValue )
250 self.__addOneMethodCall( len(_xuValue) )
251 HxValue = self.__Method( _xuValue ) # Calcul MF
253 if argsAsSerie: return HxValue
254 else: return HxValue[-1]
256 def appliedInXTo(self, paires, argsAsSerie = False ):
258 Permet de restituer le résultat de l'application de l'opérateur à un
259 argument xValue, sachant que l'opérateur est valable en xNominal.
260 Cette méthode se contente d'appliquer, son argument devant a priori
261 être du bon type. Si l'opérateur est linéaire car c'est une matrice,
262 alors il est valable en tout point nominal et il n'est pas nécessaire
264 Arguments : une liste contenant
265 - xNominal : argument permettant de donner le point où l'opérateur
266 est construit pour etre ensuite appliqué
267 - xValue : argument adapté pour appliquer l'opérateur
269 if argsAsSerie: _nxValue = paires
270 else: _nxValue = (paires,)
271 PlatformInfo.isIterable( _nxValue, True, " in Operator.appliedInXTo" )
273 if self.__Matrix is not None:
275 for paire in _nxValue:
276 _xNominal, _xValue = paire
277 self.__addOneMatrixCall()
278 HxValue.append( self.__Matrix * _xValue )
280 self.__addOneMethodCall( len(_nxValue) )
281 HxValue = self.__Method( _nxValue ) # Calcul MF
283 if argsAsSerie: return HxValue
284 else: return HxValue[-1]
286 def asMatrix(self, ValueForMethodForm = "UnknownVoidValue", argsAsSerie = False):
288 Permet de renvoyer l'opérateur sous la forme d'une matrice
290 if self.__Matrix is not None:
291 self.__addOneMatrixCall()
292 mValue = [self.__Matrix,]
293 elif ValueForMethodForm is not "UnknownVoidValue": # Ne pas utiliser "None"
296 self.__addOneMethodCall( len(ValueForMethodForm) )
297 for _vfmf in ValueForMethodForm:
298 mValue.append( numpy.matrix( self.__Method(((_vfmf, None),)) ) )
300 self.__addOneMethodCall()
301 mValue = self.__Method(((ValueForMethodForm, None),))
303 raise ValueError("Matrix form of the operator defined as a function/method requires to give an operating point.")
305 if argsAsSerie: return mValue
306 else: return mValue[-1]
310 Renvoie la taille sous forme numpy si l'opérateur est disponible sous
311 la forme d'une matrice
313 if self.__Matrix is not None:
314 return self.__Matrix.shape
316 raise ValueError("Matrix form of the operator is not available, nor the shape")
318 def nbcalls(self, which=None):
320 Renvoie les nombres d'évaluations de l'opérateur
323 self.__NbCallsAsMatrix+self.__NbCallsAsMethod,
324 self.__NbCallsAsMatrix,
325 self.__NbCallsAsMethod,
326 self.__NbCallsOfCached,
327 Operator.NbCallsAsMatrix+Operator.NbCallsAsMethod,
328 Operator.NbCallsAsMatrix,
329 Operator.NbCallsAsMethod,
330 Operator.NbCallsOfCached,
332 if which is None: return __nbcalls
333 else: return __nbcalls[which]
335 def __addOneMatrixCall(self):
336 "Comptabilise un appel"
337 self.__NbCallsAsMatrix += 1 # Decompte local
338 Operator.NbCallsAsMatrix += 1 # Decompte global
340 def __addOneMethodCall(self, nb = 1):
341 "Comptabilise un appel"
342 self.__NbCallsAsMethod += nb # Decompte local
343 Operator.NbCallsAsMethod += nb # Decompte global
345 def __addOneCacheCall(self):
346 "Comptabilise un appel"
347 self.__NbCallsOfCached += 1 # Decompte local
348 Operator.NbCallsOfCached += 1 # Decompte global
350 # ==============================================================================
351 class FullOperator(object):
353 Classe générale d'interface de type opérateur complet
354 (Direct, Linéaire Tangent, Adjoint)
357 name = "GenericFullOperator",
359 asOneFunction = None, # 1 Fonction
360 asThreeFunctions = None, # 3 Fonctions in a dictionary
361 asScript = None, # 1 or 3 Fonction(s) by script
362 asDict = None, # Parameters
365 inputAsMF = False,# Fonction(s) as Multi-Functions
370 self.__name = str(name)
371 self.__check = bool(toBeChecked)
376 if (asDict is not None) and isinstance(asDict, dict):
377 __Parameters.update( asDict )
378 if "DifferentialIncrement" in asDict:
379 __Parameters["withIncrement"] = asDict["DifferentialIncrement"]
380 if "CenteredFiniteDifference" in asDict:
381 __Parameters["withCenteredDF"] = asDict["CenteredFiniteDifference"]
382 if "EnableMultiProcessing" in asDict:
383 __Parameters["withmpEnabled"] = asDict["EnableMultiProcessing"]
384 if "NumberOfProcesses" in asDict:
385 __Parameters["withmpWorkers"] = asDict["NumberOfProcesses"]
387 if asScript is not None:
388 __Matrix, __Function = None, None
390 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
392 __Function = { "Direct":ImportFromScript(asScript).getvalue( "DirectOperator" ) }
393 __Function.update({"useApproximatedDerivatives":True})
394 __Function.update(__Parameters)
395 elif asThreeFunctions:
397 "Direct" :ImportFromScript(asScript).getvalue( "DirectOperator" ),
398 "Tangent":ImportFromScript(asScript).getvalue( "TangentOperator" ),
399 "Adjoint":ImportFromScript(asScript).getvalue( "AdjointOperator" ),
401 __Function.update(__Parameters)
404 if asOneFunction is not None:
405 if isinstance(asOneFunction, dict) and "Direct" in asOneFunction:
406 if asOneFunction["Direct"] is not None:
407 __Function = asOneFunction
409 raise ValueError("The function has to be given in a dictionnary which have 1 key (\"Direct\")")
411 __Function = { "Direct":asOneFunction }
412 __Function.update({"useApproximatedDerivatives":True})
413 __Function.update(__Parameters)
414 elif asThreeFunctions is not None:
415 if isinstance(asThreeFunctions, dict) and \
416 ("Tangent" in asThreeFunctions) and (asThreeFunctions["Tangent"] is not None) and \
417 ("Adjoint" in asThreeFunctions) and (asThreeFunctions["Adjoint"] is not None) and \
418 (("useApproximatedDerivatives" not in asThreeFunctions) or not bool(asThreeFunctions["useApproximatedDerivatives"])):
419 __Function = asThreeFunctions
420 elif isinstance(asThreeFunctions, dict) and \
421 ("Direct" in asThreeFunctions) and (asThreeFunctions["Direct"] is not None):
422 __Function = asThreeFunctions
423 __Function.update({"useApproximatedDerivatives":True})
425 raise ValueError("The functions has to be given in a dictionnary which have either 1 key (\"Direct\") or 3 keys (\"Direct\" (optionnal), \"Tangent\" and \"Adjoint\")")
426 if "Direct" not in asThreeFunctions:
427 __Function["Direct"] = asThreeFunctions["Tangent"]
428 __Function.update(__Parameters)
432 # if sys.version_info[0] < 3 and isinstance(__Function, dict):
433 # for k in ("Direct", "Tangent", "Adjoint"):
434 # if k in __Function and hasattr(__Function[k],"__class__"):
435 # if type(__Function[k]) is type(self.__init__):
436 # raise TypeError("can't use a class method (%s) as a function for the \"%s\" operator. Use a real function instead."%(type(__Function[k]),k))
438 if appliedInX is not None and isinstance(appliedInX, dict):
439 __appliedInX = appliedInX
440 elif appliedInX is not None:
441 __appliedInX = {"HXb":appliedInX}
445 if scheduledBy is not None:
446 self.__T = scheduledBy
448 if isinstance(__Function, dict) and \
449 ("useApproximatedDerivatives" in __Function) and bool(__Function["useApproximatedDerivatives"]) and \
450 ("Direct" in __Function) and (__Function["Direct"] is not None):
451 if "withCenteredDF" not in __Function: __Function["withCenteredDF"] = False
452 if "withIncrement" not in __Function: __Function["withIncrement"] = 0.01
453 if "withdX" not in __Function: __Function["withdX"] = None
454 if "withAvoidingRedundancy" not in __Function: __Function["withAvoidingRedundancy"] = True
455 if "withToleranceInRedundancy" not in __Function: __Function["withToleranceInRedundancy"] = 1.e-18
456 if "withLenghtOfRedundancy" not in __Function: __Function["withLenghtOfRedundancy"] = -1
457 if "withmpEnabled" not in __Function: __Function["withmpEnabled"] = False
458 if "withmpWorkers" not in __Function: __Function["withmpWorkers"] = None
459 if "withmfEnabled" not in __Function: __Function["withmfEnabled"] = inputAsMF
460 from daNumerics.ApproximatedDerivatives import FDApproximation
461 FDA = FDApproximation(
462 Function = __Function["Direct"],
463 centeredDF = __Function["withCenteredDF"],
464 increment = __Function["withIncrement"],
465 dX = __Function["withdX"],
466 avoidingRedundancy = __Function["withAvoidingRedundancy"],
467 toleranceInRedundancy = __Function["withToleranceInRedundancy"],
468 lenghtOfRedundancy = __Function["withLenghtOfRedundancy"],
469 mpEnabled = __Function["withmpEnabled"],
470 mpWorkers = __Function["withmpWorkers"],
471 mfEnabled = __Function["withmfEnabled"],
473 self.__FO["Direct"] = Operator( fromMethod = FDA.DirectOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF)
474 self.__FO["Tangent"] = Operator( fromMethod = FDA.TangentOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
475 self.__FO["Adjoint"] = Operator( fromMethod = FDA.AdjointOperator, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
476 elif isinstance(__Function, dict) and \
477 ("Direct" in __Function) and ("Tangent" in __Function) and ("Adjoint" in __Function) and \
478 (__Function["Direct"] is not None) and (__Function["Tangent"] is not None) and (__Function["Adjoint"] is not None):
479 self.__FO["Direct"] = Operator( fromMethod = __Function["Direct"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
480 self.__FO["Tangent"] = Operator( fromMethod = __Function["Tangent"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
481 self.__FO["Adjoint"] = Operator( fromMethod = __Function["Adjoint"], avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
482 elif asMatrix is not None:
483 __matrice = numpy.matrix( __Matrix, numpy.float )
484 self.__FO["Direct"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
485 self.__FO["Tangent"] = Operator( fromMatrix = __matrice, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
486 self.__FO["Adjoint"] = Operator( fromMatrix = __matrice.T, avoidingRedundancy = avoidRC, inputAsMultiFunction = inputAsMF )
489 raise ValueError("Improperly defined observation operator, it requires at minima either a matrix, a Direct for approximate derivatives or a Tangent/Adjoint pair.")
491 if __appliedInX is not None:
492 self.__FO["AppliedInX"] = {}
493 for key in list(__appliedInX.keys()):
494 if type( __appliedInX[key] ) is type( numpy.matrix([]) ):
495 # Pour le cas où l'on a une vraie matrice
496 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].A1, numpy.float ).T
497 elif type( __appliedInX[key] ) is type( numpy.array([]) ) and len(__appliedInX[key].shape) > 1:
498 # Pour le cas où l'on a un vecteur représenté en array avec 2 dimensions
499 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key].reshape(len(__appliedInX[key]),), numpy.float ).T
501 self.__FO["AppliedInX"][key] = numpy.matrix( __appliedInX[key], numpy.float ).T
503 self.__FO["AppliedInX"] = None
509 "x.__repr__() <==> repr(x)"
510 return repr(self.__V)
513 "x.__str__() <==> str(x)"
516 # ==============================================================================
517 class Algorithm(object):
519 Classe générale d'interface de type algorithme
521 Elle donne un cadre pour l'écriture d'une classe élémentaire d'algorithme
522 d'assimilation, en fournissant un container (dictionnaire) de variables
523 persistantes initialisées, et des méthodes d'accès à ces variables stockées.
525 Une classe élémentaire d'algorithme doit implémenter la méthode "run".
527 def __init__(self, name):
529 L'initialisation présente permet de fabriquer des variables de stockage
530 disponibles de manière générique dans les algorithmes élémentaires. Ces
531 variables de stockage sont ensuite conservées dans un dictionnaire
532 interne à l'objet, mais auquel on accède par la méthode "get".
534 Les variables prévues sont :
535 - CostFunctionJ : fonction-cout globale, somme des deux parties suivantes
536 - CostFunctionJb : partie ébauche ou background de la fonction-cout
537 - CostFunctionJo : partie observations de la fonction-cout
538 - GradientOfCostFunctionJ : gradient de la fonction-cout globale
539 - GradientOfCostFunctionJb : gradient de la partie ébauche de la fonction-cout
540 - GradientOfCostFunctionJo : gradient de la partie observations de la fonction-cout
541 - CurrentState : état courant lors d'itérations
542 - Analysis : l'analyse Xa
543 - SimulatedObservationAtBackground : l'état observé H(Xb) à l'ébauche
544 - SimulatedObservationAtCurrentState : l'état observé H(X) à l'état courant
545 - SimulatedObservationAtOptimum : l'état observé H(Xa) à l'optimum
546 - Innovation : l'innovation : d = Y - H(X)
547 - InnovationAtCurrentState : l'innovation à l'état courant : dn = Y - H(Xn)
548 - SigmaObs2 : indicateur de correction optimale des erreurs d'observation
549 - SigmaBck2 : indicateur de correction optimale des erreurs d'ébauche
550 - MahalanobisConsistency : indicateur de consistance des covariances
551 - OMA : Observation moins Analysis : Y - Xa
552 - OMB : Observation moins Background : Y - Xb
553 - AMB : Analysis moins Background : Xa - Xb
554 - APosterioriCovariance : matrice A
555 - APosterioriVariances : variances de la matrice A
556 - APosterioriStandardDeviations : écart-types de la matrice A
557 - APosterioriCorrelations : correlations de la matrice A
558 - Residu : dans le cas des algorithmes de vérification
559 On peut rajouter des variables à stocker dans l'initialisation de
560 l'algorithme élémentaire qui va hériter de cette classe
562 logging.debug("%s Initialisation", str(name))
563 self._m = PlatformInfo.SystemUsage()
565 self._name = str( name )
566 self._parameters = {"StoreSupplementaryCalculations":[]}
567 self.__required_parameters = {}
568 self.__required_inputs = {"RequiredInputValues":{"mandatory":(), "optional":()}}
570 self.StoredVariables = {}
571 self.StoredVariables["CostFunctionJ"] = Persistence.OneScalar(name = "CostFunctionJ")
572 self.StoredVariables["CostFunctionJb"] = Persistence.OneScalar(name = "CostFunctionJb")
573 self.StoredVariables["CostFunctionJo"] = Persistence.OneScalar(name = "CostFunctionJo")
574 self.StoredVariables["CostFunctionJAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJAtCurrentOptimum")
575 self.StoredVariables["CostFunctionJbAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJbAtCurrentOptimum")
576 self.StoredVariables["CostFunctionJoAtCurrentOptimum"] = Persistence.OneScalar(name = "CostFunctionJoAtCurrentOptimum")
577 self.StoredVariables["GradientOfCostFunctionJ"] = Persistence.OneVector(name = "GradientOfCostFunctionJ")
578 self.StoredVariables["GradientOfCostFunctionJb"] = Persistence.OneVector(name = "GradientOfCostFunctionJb")
579 self.StoredVariables["GradientOfCostFunctionJo"] = Persistence.OneVector(name = "GradientOfCostFunctionJo")
580 self.StoredVariables["CurrentState"] = Persistence.OneVector(name = "CurrentState")
581 self.StoredVariables["PredictedState"] = Persistence.OneVector(name = "PredictedState")
582 self.StoredVariables["Analysis"] = Persistence.OneVector(name = "Analysis")
583 self.StoredVariables["IndexOfOptimum"] = Persistence.OneIndex(name = "IndexOfOptimum")
584 self.StoredVariables["CurrentOptimum"] = Persistence.OneVector(name = "CurrentOptimum")
585 self.StoredVariables["SimulatedObservationAtBackground"] = Persistence.OneVector(name = "SimulatedObservationAtBackground")
586 self.StoredVariables["SimulatedObservationAtCurrentState"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentState")
587 self.StoredVariables["SimulatedObservationAtOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtOptimum")
588 self.StoredVariables["SimulatedObservationAtCurrentOptimum"] = Persistence.OneVector(name = "SimulatedObservationAtCurrentOptimum")
589 self.StoredVariables["Innovation"] = Persistence.OneVector(name = "Innovation")
590 self.StoredVariables["InnovationAtCurrentState"] = Persistence.OneVector(name = "InnovationAtCurrentState")
591 self.StoredVariables["SigmaObs2"] = Persistence.OneScalar(name = "SigmaObs2")
592 self.StoredVariables["SigmaBck2"] = Persistence.OneScalar(name = "SigmaBck2")
593 self.StoredVariables["MahalanobisConsistency"] = Persistence.OneScalar(name = "MahalanobisConsistency")
594 self.StoredVariables["OMA"] = Persistence.OneVector(name = "OMA")
595 self.StoredVariables["OMB"] = Persistence.OneVector(name = "OMB")
596 self.StoredVariables["BMA"] = Persistence.OneVector(name = "BMA")
597 self.StoredVariables["APosterioriCovariance"] = Persistence.OneMatrix(name = "APosterioriCovariance")
598 self.StoredVariables["APosterioriVariances"] = Persistence.OneVector(name = "APosterioriVariances")
599 self.StoredVariables["APosterioriStandardDeviations"] = Persistence.OneVector(name = "APosterioriStandardDeviations")
600 self.StoredVariables["APosterioriCorrelations"] = Persistence.OneMatrix(name = "APosterioriCorrelations")
601 self.StoredVariables["SimulationQuantiles"] = Persistence.OneMatrix(name = "SimulationQuantiles")
602 self.StoredVariables["Residu"] = Persistence.OneScalar(name = "Residu")
604 def _pre_run(self, Parameters, Xb=None, Y=None, R=None, B=None, Q=None ):
606 logging.debug("%s Lancement", self._name)
607 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
609 # Mise a jour de self._parameters avec Parameters
610 self.__setParameters(Parameters)
612 # Corrections et complements
613 def __test_vvalue(argument, variable, argname):
615 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
616 raise ValueError("%s %s vector %s has to be properly defined!"%(self._name,argname,variable))
617 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
618 logging.debug("%s %s vector %s is not set, but is optional."%(self._name,argname,variable))
620 logging.debug("%s %s vector %s is not set, but is not required."%(self._name,argname,variable))
622 logging.debug("%s %s vector %s is set, and its size is %i."%(self._name,argname,variable,numpy.array(argument).size))
624 __test_vvalue( Xb, "Xb", "Background or initial state" )
625 __test_vvalue( Y, "Y", "Observation" )
627 def __test_cvalue(argument, variable, argname):
629 if variable in self.__required_inputs["RequiredInputValues"]["mandatory"]:
630 raise ValueError("%s %s error covariance matrix %s has to be properly defined!"%(self._name,argname,variable))
631 elif variable in self.__required_inputs["RequiredInputValues"]["optional"]:
632 logging.debug("%s %s error covariance matrix %s is not set, but is optional."%(self._name,argname,variable))
634 logging.debug("%s %s error covariance matrix %s is not set, but is not required."%(self._name,argname,variable))
636 logging.debug("%s %s error covariance matrix %s is set."%(self._name,argname,variable))
638 __test_cvalue( R, "R", "Observation" )
639 __test_cvalue( B, "B", "Background" )
640 __test_cvalue( Q, "Q", "Evolution" )
642 if ("Bounds" in self._parameters) and isinstance(self._parameters["Bounds"], (list, tuple)) and (len(self._parameters["Bounds"]) > 0):
643 logging.debug("%s Prise en compte des bornes effectuee"%(self._name,))
645 self._parameters["Bounds"] = None
647 if logging.getLogger().level < logging.WARNING:
648 self._parameters["optiprint"], self._parameters["optdisp"] = 1, 1
649 if PlatformInfo.has_scipy:
650 import scipy.optimize
651 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_ALL
653 self._parameters["optmessages"] = 15
655 self._parameters["optiprint"], self._parameters["optdisp"] = -1, 0
656 if PlatformInfo.has_scipy:
657 import scipy.optimize
658 self._parameters["optmessages"] = scipy.optimize.tnc.MSG_NONE
660 self._parameters["optmessages"] = 15
664 def _post_run(self,_oH=None):
666 if ("StoreSupplementaryCalculations" in self._parameters) and \
667 "APosterioriCovariance" in self._parameters["StoreSupplementaryCalculations"]:
668 for _A in self.StoredVariables["APosterioriCovariance"]:
669 if "APosterioriVariances" in self._parameters["StoreSupplementaryCalculations"]:
670 self.StoredVariables["APosterioriVariances"].store( numpy.diag(_A) )
671 if "APosterioriStandardDeviations" in self._parameters["StoreSupplementaryCalculations"]:
672 self.StoredVariables["APosterioriStandardDeviations"].store( numpy.sqrt(numpy.diag(_A)) )
673 if "APosterioriCorrelations" in self._parameters["StoreSupplementaryCalculations"]:
674 _EI = numpy.diag(1./numpy.sqrt(numpy.diag(_A)))
675 _C = numpy.dot(_EI, numpy.dot(_A, _EI))
676 self.StoredVariables["APosterioriCorrelations"].store( _C )
678 logging.debug("%s Nombre d'évaluation(s) de l'opérateur d'observation direct/tangent/adjoint.: %i/%i/%i", self._name, _oH["Direct"].nbcalls(0),_oH["Tangent"].nbcalls(0),_oH["Adjoint"].nbcalls(0))
679 logging.debug("%s Nombre d'appels au cache d'opérateur d'observation direct/tangent/adjoint..: %i/%i/%i", self._name, _oH["Direct"].nbcalls(3),_oH["Tangent"].nbcalls(3),_oH["Adjoint"].nbcalls(3))
680 logging.debug("%s Taille mémoire utilisée de %.0f Mio", self._name, self._m.getUsedMemory("Mio"))
681 logging.debug("%s Terminé", self._name)
684 def _toStore(self, key):
685 "True if in StoreSupplementaryCalculations, else False"
686 return key in self._parameters["StoreSupplementaryCalculations"]
688 def get(self, key=None):
690 Renvoie l'une des variables stockées identifiée par la clé, ou le
691 dictionnaire de l'ensemble des variables disponibles en l'absence de
692 clé. Ce sont directement les variables sous forme objet qui sont
693 renvoyées, donc les méthodes d'accès à l'objet individuel sont celles
694 des classes de persistance.
697 return self.StoredVariables[key]
699 return self.StoredVariables
701 def __contains__(self, key=None):
702 "D.__contains__(k) -> True if D has a key k, else False"
703 return key in self.StoredVariables
706 "D.keys() -> list of D's keys"
707 if hasattr(self, "StoredVariables"):
708 return self.StoredVariables.keys()
713 "D.pop(k[,d]) -> v, remove specified key and return the corresponding value"
714 if hasattr(self, "StoredVariables"):
715 return self.StoredVariables.pop(k, d)
720 raise TypeError("pop expected at least 1 arguments, got 0")
721 "If key is not found, d is returned if given, otherwise KeyError is raised"
727 def run(self, Xb=None, Y=None, H=None, M=None, R=None, B=None, Q=None, Parameters=None):
729 Doit implémenter l'opération élémentaire de calcul d'assimilation sous
730 sa forme mathématique la plus naturelle possible.
732 raise NotImplementedError("Mathematical assimilation calculation has not been implemented!")
734 def defineRequiredParameter(self, name = None, default = None, typecast = None, message = None, minval = None, maxval = None, listval = None):
736 Permet de définir dans l'algorithme des paramètres requis et leurs
737 caractéristiques par défaut.
740 raise ValueError("A name is mandatory to define a required parameter.")
742 self.__required_parameters[name] = {
744 "typecast" : typecast,
750 logging.debug("%s %s (valeur par défaut = %s)", self._name, message, self.setParameterValue(name))
752 def getRequiredParameters(self, noDetails=True):
754 Renvoie la liste des noms de paramètres requis ou directement le
755 dictionnaire des paramètres requis.
758 return sorted(self.__required_parameters.keys())
760 return self.__required_parameters
762 def setParameterValue(self, name=None, value=None):
764 Renvoie la valeur d'un paramètre requis de manière contrôlée
766 default = self.__required_parameters[name]["default"]
767 typecast = self.__required_parameters[name]["typecast"]
768 minval = self.__required_parameters[name]["minval"]
769 maxval = self.__required_parameters[name]["maxval"]
770 listval = self.__required_parameters[name]["listval"]
772 if value is None and default is None:
774 elif value is None and default is not None:
775 if typecast is None: __val = default
776 else: __val = typecast( default )
778 if typecast is None: __val = value
779 else: __val = typecast( value )
781 if minval is not None and (numpy.array(__val, float) < minval).any():
782 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be less than %s."%(name, __val, minval))
783 if maxval is not None and (numpy.array(__val, float) > maxval).any():
784 raise ValueError("The parameter named \"%s\" of value \"%s\" can not be greater than %s."%(name, __val, maxval))
785 if listval is not None:
786 if typecast is list or typecast is tuple or isinstance(__val,list) or isinstance(__val,tuple):
789 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%(v, name, listval))
790 elif __val not in listval:
791 raise ValueError("The value \"%s\" of the parameter named \"%s\" is not allowed, it has to be in the list %s."%( __val, name,listval))
794 def requireInputArguments(self, mandatory=(), optional=()):
796 Permet d'imposer des arguments requises en entrée
798 self.__required_inputs["RequiredInputValues"]["mandatory"] = tuple( mandatory )
799 self.__required_inputs["RequiredInputValues"]["optional"] = tuple( optional )
801 def __setParameters(self, fromDico={}):
803 Permet de stocker les paramètres reçus dans le dictionnaire interne.
805 self._parameters.update( fromDico )
806 for k in self.__required_parameters.keys():
807 if k in fromDico.keys():
808 self._parameters[k] = self.setParameterValue(k,fromDico[k])
810 self._parameters[k] = self.setParameterValue(k)
811 logging.debug("%s %s : %s", self._name, self.__required_parameters[k]["message"], self._parameters[k])
813 # ==============================================================================
814 class AlgorithmAndParameters(object):
816 Classe générale d'interface d'action pour l'algorithme et ses paramètres
819 name = "GenericAlgorithm",
826 self.__name = str(name)
830 self.__algorithm = {}
831 self.__algorithmFile = None
832 self.__algorithmName = None
834 self.updateParameters( asDict, asScript )
836 if asAlgorithm is None and asScript is not None:
837 __Algo = ImportFromScript(asScript).getvalue( "Algorithm" )
841 if __Algo is not None:
842 self.__A = str(__Algo)
843 self.__P.update( {"Algorithm":self.__A} )
845 self.__setAlgorithm( self.__A )
847 def updateParameters(self,
851 "Mise a jour des parametres"
852 if asDict is None and asScript is not None:
853 __Dict = ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
857 if __Dict is not None:
858 self.__P.update( dict(__Dict) )
860 def executePythonScheme(self, asDictAO = None):
861 "Permet de lancer le calcul d'assimilation"
862 Operator.CM.clearCache()
864 if not isinstance(asDictAO, dict):
865 raise ValueError("The objects for algorithm calculation have to be given together as a dictionnary, and they are not")
866 if hasattr(asDictAO["Background"],"getO"): self.__Xb = asDictAO["Background"].getO()
867 elif hasattr(asDictAO["CheckingPoint"],"getO"): self.__Xb = asDictAO["CheckingPoint"].getO()
868 else: self.__Xb = None
869 if hasattr(asDictAO["Observation"],"getO"): self.__Y = asDictAO["Observation"].getO()
870 else: self.__Y = asDictAO["Observation"]
871 if hasattr(asDictAO["ControlInput"],"getO"): self.__U = asDictAO["ControlInput"].getO()
872 else: self.__U = asDictAO["ControlInput"]
873 if hasattr(asDictAO["ObservationOperator"],"getO"): self.__HO = asDictAO["ObservationOperator"].getO()
874 else: self.__HO = asDictAO["ObservationOperator"]
875 if hasattr(asDictAO["EvolutionModel"],"getO"): self.__EM = asDictAO["EvolutionModel"].getO()
876 else: self.__EM = asDictAO["EvolutionModel"]
877 if hasattr(asDictAO["ControlModel"],"getO"): self.__CM = asDictAO["ControlModel"].getO()
878 else: self.__CM = asDictAO["ControlModel"]
879 self.__B = asDictAO["BackgroundError"]
880 self.__R = asDictAO["ObservationError"]
881 self.__Q = asDictAO["EvolutionError"]
883 self.__shape_validate()
885 self.__algorithm.run(
895 Parameters = self.__P,
899 def executeYACSScheme(self, FileName=None):
900 "Permet de lancer le calcul d'assimilation"
901 if FileName is None or not os.path.exists(FileName):
902 raise ValueError("a YACS file name has to be given for YACS execution.\n")
904 __file = os.path.abspath(FileName)
905 logging.debug("The YACS file name is \"%s\"."%__file)
906 if not PlatformInfo.has_salome or \
907 not PlatformInfo.has_yacs or \
908 not PlatformInfo.has_adao:
909 raise ImportError("\n\n"+\
910 "Unable to get SALOME, YACS or ADAO environnement variables.\n"+\
911 "Please load the right environnement before trying to use it.\n")
916 SALOMERuntime.RuntimeSALOME_setRuntime()
918 r = pilot.getRuntime()
919 xmlLoader = loader.YACSLoader()
920 xmlLoader.registerProcCataLoader()
922 catalogAd = r.loadCatalog("proc", __file)
923 r.addCatalog(catalogAd)
928 p = xmlLoader.load(__file)
929 except IOError as ex:
930 print("The YACS XML schema file can not be loaded: %s"%(ex,))
932 logger = p.getLogger("parser")
933 if not logger.isEmpty():
934 print("The imported YACS XML schema has errors on parsing:")
935 print(logger.getStr())
938 print("The YACS XML schema is not valid and will not be executed:")
939 print(p.getErrorReport())
941 info=pilot.LinkInfo(pilot.LinkInfo.ALL_DONT_STOP)
942 p.checkConsistency(info)
943 if info.areWarningsOrErrors():
944 print("The YACS XML schema is not coherent and will not be executed:")
945 print(info.getGlobalRepr())
947 e = pilot.ExecutorSwig()
949 if p.getEffectiveState() != pilot.DONE:
950 print(p.getErrorReport())
954 def get(self, key = None):
955 "Vérifie l'existence d'une clé de variable ou de paramètres"
956 if key in self.__algorithm:
957 return self.__algorithm.get( key )
958 elif key in self.__P:
964 "Necessaire pour le pickling"
965 return self.__algorithm.pop(k, d)
967 def getAlgorithmRequiredParameters(self, noDetails=True):
968 "Renvoie la liste des paramètres requis selon l'algorithme"
969 return self.__algorithm.getRequiredParameters(noDetails)
971 def setObserver(self, __V, __O, __I, __S):
972 if self.__algorithm is None \
973 or isinstance(self.__algorithm, dict) \
974 or not hasattr(self.__algorithm,"StoredVariables"):
975 raise ValueError("No observer can be build before choosing an algorithm.")
976 if __V not in self.__algorithm:
977 raise ValueError("An observer requires to be set on a variable named %s which does not exist."%__V)
979 self.__algorithm.StoredVariables[ __V ].setDataObserver(
982 HookParameters = __I,
985 def removeObserver(self, __V, __O, __A = False):
986 if self.__algorithm is None \
987 or isinstance(self.__algorithm, dict) \
988 or not hasattr(self.__algorithm,"StoredVariables"):
989 raise ValueError("No observer can be removed before choosing an algorithm.")
990 if __V not in self.__algorithm:
991 raise ValueError("An observer requires to be removed on a variable named %s which does not exist."%__V)
993 return self.__algorithm.StoredVariables[ __V ].removeDataObserver(
998 def hasObserver(self, __V):
999 if self.__algorithm is None \
1000 or isinstance(self.__algorithm, dict) \
1001 or not hasattr(self.__algorithm,"StoredVariables"):
1003 if __V not in self.__algorithm:
1005 return self.__algorithm.StoredVariables[ __V ].hasDataObserver()
1008 return list(self.__algorithm.keys()) + list(self.__P.keys())
1010 def __contains__(self, key=None):
1011 "D.__contains__(k) -> True if D has a key k, else False"
1012 return key in self.__algorithm or key in self.__P
1015 "x.__repr__() <==> repr(x)"
1016 return repr(self.__A)+", "+repr(self.__P)
1019 "x.__str__() <==> str(x)"
1020 return str(self.__A)+", "+str(self.__P)
1022 def __setAlgorithm(self, choice = None ):
1024 Permet de sélectionner l'algorithme à utiliser pour mener à bien l'étude
1025 d'assimilation. L'argument est un champ caractère se rapportant au nom
1026 d'un algorithme réalisant l'opération sur les arguments fixes.
1029 raise ValueError("Error: algorithm choice has to be given")
1030 if self.__algorithmName is not None:
1031 raise ValueError("Error: algorithm choice has already been done as \"%s\", it can't be changed."%self.__algorithmName)
1032 daDirectory = "daAlgorithms"
1034 # Recherche explicitement le fichier complet
1035 # ------------------------------------------
1037 for directory in sys.path:
1038 if os.path.isfile(os.path.join(directory, daDirectory, str(choice)+'.py')):
1039 module_path = os.path.abspath(os.path.join(directory, daDirectory))
1040 if module_path is None:
1041 raise ImportError("No algorithm module named \"%s\" has been found in the search path.\n The search path is %s"%(choice, sys.path))
1043 # Importe le fichier complet comme un module
1044 # ------------------------------------------
1046 sys_path_tmp = sys.path ; sys.path.insert(0,module_path)
1047 self.__algorithmFile = __import__(str(choice), globals(), locals(), [])
1048 if not hasattr(self.__algorithmFile, "ElementaryAlgorithm"):
1049 raise ImportError("this module does not define a valid elementary algorithm.")
1050 self.__algorithmName = str(choice)
1051 sys.path = sys_path_tmp ; del sys_path_tmp
1052 except ImportError as e:
1053 raise ImportError("The module named \"%s\" was found, but is incorrect at the import stage.\n The import error message is: %s"%(choice,e))
1055 # Instancie un objet du type élémentaire du fichier
1056 # -------------------------------------------------
1057 self.__algorithm = self.__algorithmFile.ElementaryAlgorithm()
1060 def __shape_validate(self):
1062 Validation de la correspondance correcte des tailles des variables et
1063 des matrices s'il y en a.
1065 if self.__Xb is None: __Xb_shape = (0,)
1066 elif hasattr(self.__Xb,"size"): __Xb_shape = (self.__Xb.size,)
1067 elif hasattr(self.__Xb,"shape"):
1068 if isinstance(self.__Xb.shape, tuple): __Xb_shape = self.__Xb.shape
1069 else: __Xb_shape = self.__Xb.shape()
1070 else: raise TypeError("The background (Xb) has no attribute of shape: problem !")
1072 if self.__Y is None: __Y_shape = (0,)
1073 elif hasattr(self.__Y,"size"): __Y_shape = (self.__Y.size,)
1074 elif hasattr(self.__Y,"shape"):
1075 if isinstance(self.__Y.shape, tuple): __Y_shape = self.__Y.shape
1076 else: __Y_shape = self.__Y.shape()
1077 else: raise TypeError("The observation (Y) has no attribute of shape: problem !")
1079 if self.__U is None: __U_shape = (0,)
1080 elif hasattr(self.__U,"size"): __U_shape = (self.__U.size,)
1081 elif hasattr(self.__U,"shape"):
1082 if isinstance(self.__U.shape, tuple): __U_shape = self.__U.shape
1083 else: __U_shape = self.__U.shape()
1084 else: raise TypeError("The control (U) has no attribute of shape: problem !")
1086 if self.__B is None: __B_shape = (0,0)
1087 elif hasattr(self.__B,"shape"):
1088 if isinstance(self.__B.shape, tuple): __B_shape = self.__B.shape
1089 else: __B_shape = self.__B.shape()
1090 else: raise TypeError("The a priori errors covariance matrix (B) has no attribute of shape: problem !")
1092 if self.__R is None: __R_shape = (0,0)
1093 elif hasattr(self.__R,"shape"):
1094 if isinstance(self.__R.shape, tuple): __R_shape = self.__R.shape
1095 else: __R_shape = self.__R.shape()
1096 else: raise TypeError("The observation errors covariance matrix (R) has no attribute of shape: problem !")
1098 if self.__Q is None: __Q_shape = (0,0)
1099 elif hasattr(self.__Q,"shape"):
1100 if isinstance(self.__Q.shape, tuple): __Q_shape = self.__Q.shape
1101 else: __Q_shape = self.__Q.shape()
1102 else: raise TypeError("The evolution errors covariance matrix (Q) has no attribute of shape: problem !")
1104 if len(self.__HO) == 0: __HO_shape = (0,0)
1105 elif isinstance(self.__HO, dict): __HO_shape = (0,0)
1106 elif hasattr(self.__HO["Direct"],"shape"):
1107 if isinstance(self.__HO["Direct"].shape, tuple): __HO_shape = self.__HO["Direct"].shape
1108 else: __HO_shape = self.__HO["Direct"].shape()
1109 else: raise TypeError("The observation operator (H) has no attribute of shape: problem !")
1111 if len(self.__EM) == 0: __EM_shape = (0,0)
1112 elif isinstance(self.__EM, dict): __EM_shape = (0,0)
1113 elif hasattr(self.__EM["Direct"],"shape"):
1114 if isinstance(self.__EM["Direct"].shape, tuple): __EM_shape = self.__EM["Direct"].shape
1115 else: __EM_shape = self.__EM["Direct"].shape()
1116 else: raise TypeError("The evolution model (EM) has no attribute of shape: problem !")
1118 if len(self.__CM) == 0: __CM_shape = (0,0)
1119 elif isinstance(self.__CM, dict): __CM_shape = (0,0)
1120 elif hasattr(self.__CM["Direct"],"shape"):
1121 if isinstance(self.__CM["Direct"].shape, tuple): __CM_shape = self.__CM["Direct"].shape
1122 else: __CM_shape = self.__CM["Direct"].shape()
1123 else: raise TypeError("The control model (CM) has no attribute of shape: problem !")
1125 # Vérification des conditions
1126 # ---------------------------
1127 if not( len(__Xb_shape) == 1 or min(__Xb_shape) == 1 ):
1128 raise ValueError("Shape characteristic of background (Xb) is incorrect: \"%s\"."%(__Xb_shape,))
1129 if not( len(__Y_shape) == 1 or min(__Y_shape) == 1 ):
1130 raise ValueError("Shape characteristic of observation (Y) is incorrect: \"%s\"."%(__Y_shape,))
1132 if not( min(__B_shape) == max(__B_shape) ):
1133 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) is incorrect: \"%s\"."%(__B_shape,))
1134 if not( min(__R_shape) == max(__R_shape) ):
1135 raise ValueError("Shape characteristic of observation errors covariance matrix (R) is incorrect: \"%s\"."%(__R_shape,))
1136 if not( min(__Q_shape) == max(__Q_shape) ):
1137 raise ValueError("Shape characteristic of evolution errors covariance matrix (Q) is incorrect: \"%s\"."%(__Q_shape,))
1138 if not( min(__EM_shape) == max(__EM_shape) ):
1139 raise ValueError("Shape characteristic of evolution operator (EM) is incorrect: \"%s\"."%(__EM_shape,))
1141 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[1] == max(__Xb_shape) ):
1142 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and state (X) \"%s\" are incompatible."%(__HO_shape,__Xb_shape))
1143 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and not( __HO_shape[0] == max(__Y_shape) ):
1144 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation (Y) \"%s\" are incompatible."%(__HO_shape,__Y_shape))
1145 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__B) > 0 and not( __HO_shape[1] == __B_shape[0] ):
1146 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and a priori errors covariance matrix (B) \"%s\" are incompatible."%(__HO_shape,__B_shape))
1147 if len(self.__HO) > 0 and not isinstance(self.__HO, dict) and len(self.__R) > 0 and not( __HO_shape[0] == __R_shape[1] ):
1148 raise ValueError("Shape characteristic of observation operator (H) \"%s\" and observation errors covariance matrix (R) \"%s\" are incompatible."%(__HO_shape,__R_shape))
1150 if self.__B is not None and len(self.__B) > 0 and not( __B_shape[1] == max(__Xb_shape) ):
1151 if self.__algorithmName in ["EnsembleBlue",]:
1152 asPersistentVector = self.__Xb.reshape((-1,min(__B_shape)))
1153 self.__Xb = Persistence.OneVector("Background", basetype=numpy.matrix)
1154 for member in asPersistentVector:
1155 self.__Xb.store( numpy.matrix( numpy.ravel(member), numpy.float ).T )
1156 __Xb_shape = min(__B_shape)
1158 raise ValueError("Shape characteristic of a priori errors covariance matrix (B) \"%s\" and background (Xb) \"%s\" are incompatible."%(__B_shape,__Xb_shape))
1160 if self.__R is not None and len(self.__R) > 0 and not( __R_shape[1] == max(__Y_shape) ):
1161 raise ValueError("Shape characteristic of observation errors covariance matrix (R) \"%s\" and observation (Y) \"%s\" are incompatible."%(__R_shape,__Y_shape))
1163 if self.__EM is not None and len(self.__EM) > 0 and not isinstance(self.__EM, dict) and not( __EM_shape[1] == max(__Xb_shape) ):
1164 raise ValueError("Shape characteristic of evolution model (EM) \"%s\" and state (X) \"%s\" are incompatible."%(__EM_shape,__Xb_shape))
1166 if self.__CM is not None and len(self.__CM) > 0 and not isinstance(self.__CM, dict) and not( __CM_shape[1] == max(__U_shape) ):
1167 raise ValueError("Shape characteristic of control model (CM) \"%s\" and control (U) \"%s\" are incompatible."%(__CM_shape,__U_shape))
1169 if ("Bounds" in self.__P) \
1170 and (isinstance(self.__P["Bounds"], list) or isinstance(self.__P["Bounds"], tuple)) \
1171 and (len(self.__P["Bounds"]) != max(__Xb_shape)):
1172 raise ValueError("The number \"%s\" of bound pairs for the state (X) components is different of the size \"%s\" of the state itself." \
1173 %(len(self.__P["Bounds"]),max(__Xb_shape)))
1177 # ==============================================================================
1178 class RegulationAndParameters(object):
1180 Classe générale d'interface d'action pour la régulation et ses paramètres
1183 name = "GenericRegulation",
1190 self.__name = str(name)
1193 if asAlgorithm is None and asScript is not None:
1194 __Algo = ImportFromScript(asScript).getvalue( "Algorithm" )
1196 __Algo = asAlgorithm
1198 if asDict is None and asScript is not None:
1199 __Dict = ImportFromScript(asScript).getvalue( self.__name, "Parameters" )
1203 if __Dict is not None:
1204 self.__P.update( dict(__Dict) )
1206 if __Algo is not None:
1207 self.__P.update( {"Algorithm":self.__A} )
1209 def get(self, key = None):
1210 "Vérifie l'existence d'une clé de variable ou de paramètres"
1212 return self.__P[key]
1216 # ==============================================================================
1217 class DataObserver(object):
1219 Classe générale d'interface de type observer
1222 name = "GenericObserver",
1234 self.__name = str(name)
1239 if onVariable is None:
1240 raise ValueError("setting an observer has to be done over a variable name or a list of variable names, not over None.")
1241 elif type(onVariable) in (tuple, list):
1242 self.__V = tuple(map( str, onVariable ))
1243 if withInfo is None:
1246 self.__I = (str(withInfo),)*len(self.__V)
1247 elif isinstance(onVariable, str):
1248 self.__V = (onVariable,)
1249 if withInfo is None:
1250 self.__I = (onVariable,)
1252 self.__I = (str(withInfo),)
1254 raise ValueError("setting an observer has to be done over a variable name or a list of variable names.")
1256 if asString is not None:
1257 __FunctionText = asString
1258 elif (asTemplate is not None) and (asTemplate in Templates.ObserverTemplates):
1259 __FunctionText = Templates.ObserverTemplates[asTemplate]
1260 elif asScript is not None:
1261 __FunctionText = ImportFromScript(asScript).getstring()
1264 __Function = ObserverF(__FunctionText)
1266 if asObsObject is not None:
1267 self.__O = asObsObject
1269 self.__O = __Function.getfunc()
1271 for k in range(len(self.__V)):
1274 if ename not in withAlgo:
1275 raise ValueError("An observer is asked to be set on a variable named %s which does not exist."%ename)
1277 withAlgo.setObserver(ename, self.__O, einfo, scheduledBy)
1280 "x.__repr__() <==> repr(x)"
1281 return repr(self.__V)+"\n"+repr(self.__O)
1284 "x.__str__() <==> str(x)"
1285 return str(self.__V)+"\n"+str(self.__O)
1287 # ==============================================================================
1288 class State(object):
1290 Classe générale d'interface de type état
1293 name = "GenericVector",
1295 asPersistentVector = None,
1301 toBeChecked = False,
1304 Permet de définir un vecteur :
1305 - asVector : entrée des données, comme un vecteur compatible avec le
1306 constructeur de numpy.matrix, ou "True" si entrée par script.
1307 - asPersistentVector : entrée des données, comme une série de vecteurs
1308 compatible avec le constructeur de numpy.matrix, ou comme un objet de
1309 type Persistence, ou "True" si entrée par script.
1310 - asScript : si un script valide est donné contenant une variable
1311 nommée "name", la variable est de type "asVector" (par défaut) ou
1312 "asPersistentVector" selon que l'une de ces variables est placée à
1314 - asDataFile : si un ou plusieurs fichiers valides sont donnés
1315 contenant des valeurs en colonnes, elles-mêmes nommées "colNames"
1316 (s'il n'y a pas de nom de colonne indiquée, on cherche une colonne
1317 nommée "name"), on récupère les colonnes et on les range ligne après
1318 ligne (colMajor=False) ou colonne après colonne (colMajor=True). La
1319 variable résultante est de type "asVector" (par défaut) ou
1320 "asPersistentVector" selon que l'une de ces variables est placée à
1323 self.__name = str(name)
1324 self.__check = bool(toBeChecked)
1328 self.__is_vector = False
1329 self.__is_series = False
1331 if asScript is not None:
1332 __Vector, __Series = None, None
1333 if asPersistentVector:
1334 __Series = ImportFromScript(asScript).getvalue( self.__name )
1336 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1337 elif asDataFile is not None:
1338 __Vector, __Series = None, None
1339 if asPersistentVector:
1340 if colNames is not None:
1341 __Series = ImportFromFile(asDataFile).getvalue( colNames )[1]
1343 __Series = ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1344 if bool(colMajor) and not ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1345 __Series = numpy.transpose(__Series)
1346 elif not bool(colMajor) and ImportFromFile(asDataFile).getformat() == "application/numpy.npz":
1347 __Series = numpy.transpose(__Series)
1349 if colNames is not None:
1350 __Vector = ImportFromFile(asDataFile).getvalue( colNames )[1]
1352 __Vector = ImportFromFile(asDataFile).getvalue( [self.__name,] )[1]
1354 __Vector = numpy.ravel(__Vector, order = "F")
1356 __Vector = numpy.ravel(__Vector, order = "C")
1358 __Vector, __Series = asVector, asPersistentVector
1360 if __Vector is not None:
1361 self.__is_vector = True
1362 self.__V = numpy.matrix( numpy.asmatrix(__Vector).A1, numpy.float ).T
1363 self.shape = self.__V.shape
1364 self.size = self.__V.size
1365 elif __Series is not None:
1366 self.__is_series = True
1367 if isinstance(__Series, (tuple, list, numpy.ndarray, numpy.matrix, str)):
1368 self.__V = Persistence.OneVector(self.__name, basetype=numpy.matrix)
1369 if isinstance(__Series, str): __Series = eval(__Series)
1370 for member in __Series:
1371 self.__V.store( numpy.matrix( numpy.asmatrix(member).A1, numpy.float ).T )
1374 if isinstance(self.__V.shape, (tuple, list)):
1375 self.shape = self.__V.shape
1377 self.shape = self.__V.shape()
1378 if len(self.shape) == 1:
1379 self.shape = (self.shape[0],1)
1380 self.size = self.shape[0] * self.shape[1]
1382 raise ValueError("The %s object is improperly defined, it requires at minima either a vector, a list/tuple of vectors or a persistent object. Please check your vector input."%self.__name)
1384 if scheduledBy is not None:
1385 self.__T = scheduledBy
1387 def getO(self, withScheduler=False):
1389 return self.__V, self.__T
1390 elif self.__T is None:
1396 "Vérification du type interne"
1397 return self.__is_vector
1400 "Vérification du type interne"
1401 return self.__is_series
1404 "x.__repr__() <==> repr(x)"
1405 return repr(self.__V)
1408 "x.__str__() <==> str(x)"
1409 return str(self.__V)
1411 # ==============================================================================
1412 class Covariance(object):
1414 Classe générale d'interface de type covariance
1417 name = "GenericCovariance",
1418 asCovariance = None,
1419 asEyeByScalar = None,
1420 asEyeByVector = None,
1423 toBeChecked = False,
1426 Permet de définir une covariance :
1427 - asCovariance : entrée des données, comme une matrice compatible avec
1428 le constructeur de numpy.matrix
1429 - asEyeByScalar : entrée des données comme un seul scalaire de variance,
1430 multiplicatif d'une matrice de corrélation identité, aucune matrice
1431 n'étant donc explicitement à donner
1432 - asEyeByVector : entrée des données comme un seul vecteur de variance,
1433 à mettre sur la diagonale d'une matrice de corrélation, aucune matrice
1434 n'étant donc explicitement à donner
1435 - asCovObject : entrée des données comme un objet python, qui a les
1436 methodes obligatoires "getT", "getI", "diag", "trace", "__add__",
1437 "__sub__", "__neg__", "__mul__", "__rmul__" et facultatives "shape",
1438 "size", "cholesky", "choleskyI", "asfullmatrix", "__repr__", "__str__"
1439 - toBeChecked : booléen indiquant si le caractère SDP de la matrice
1440 pleine doit être vérifié
1442 self.__name = str(name)
1443 self.__check = bool(toBeChecked)
1446 self.__is_scalar = False
1447 self.__is_vector = False
1448 self.__is_matrix = False
1449 self.__is_object = False
1451 if asScript is not None:
1452 __Matrix, __Scalar, __Vector, __Object = None, None, None, None
1454 __Scalar = ImportFromScript(asScript).getvalue( self.__name )
1456 __Vector = ImportFromScript(asScript).getvalue( self.__name )
1458 __Object = ImportFromScript(asScript).getvalue( self.__name )
1460 __Matrix = ImportFromScript(asScript).getvalue( self.__name )
1462 __Matrix, __Scalar, __Vector, __Object = asCovariance, asEyeByScalar, asEyeByVector, asCovObject
1464 if __Scalar is not None:
1465 if numpy.matrix(__Scalar).size != 1:
1466 raise ValueError(' The diagonal multiplier given to define a sparse matrix is not a unique scalar value.\n Its actual measured size is %i. Please check your scalar input.'%numpy.matrix(__Scalar).size)
1467 self.__is_scalar = True
1468 self.__C = numpy.abs( float(__Scalar) )
1471 elif __Vector is not None:
1472 self.__is_vector = True
1473 self.__C = numpy.abs( numpy.array( numpy.ravel( numpy.matrix(__Vector, float ) ) ) )
1474 self.shape = (self.__C.size,self.__C.size)
1475 self.size = self.__C.size**2
1476 elif __Matrix is not None:
1477 self.__is_matrix = True
1478 self.__C = numpy.matrix( __Matrix, float )
1479 self.shape = self.__C.shape
1480 self.size = self.__C.size
1481 elif __Object is not None:
1482 self.__is_object = True
1484 for at in ("getT","getI","diag","trace","__add__","__sub__","__neg__","__mul__","__rmul__"):
1485 if not hasattr(self.__C,at):
1486 raise ValueError("The matrix given for %s as an object has no attribute \"%s\". Please check your object input."%(self.__name,at))
1487 if hasattr(self.__C,"shape"):
1488 self.shape = self.__C.shape
1491 if hasattr(self.__C,"size"):
1492 self.size = self.__C.size
1497 # raise ValueError("The %s covariance matrix has to be specified either as a matrix, a vector for its diagonal or a scalar multiplying an identity matrix."%self.__name)
1501 def __validate(self):
1503 if self.__C is None:
1504 raise UnboundLocalError("%s covariance matrix value has not been set!"%(self.__name,))
1505 if self.ismatrix() and min(self.shape) != max(self.shape):
1506 raise ValueError("The given matrix for %s is not a square one, its shape is %s. Please check your matrix input."%(self.__name,self.shape))
1507 if self.isobject() and min(self.shape) != max(self.shape):
1508 raise ValueError("The matrix given for \"%s\" is not a square one, its shape is %s. Please check your object input."%(self.__name,self.shape))
1509 if self.isscalar() and self.__C <= 0:
1510 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your scalar input %s."%(self.__name,self.__C))
1511 if self.isvector() and (self.__C <= 0).any():
1512 raise ValueError("The \"%s\" covariance matrix is not positive-definite. Please check your vector input."%(self.__name,))
1513 if self.ismatrix() and (self.__check or logging.getLogger().level < logging.WARNING):
1515 L = numpy.linalg.cholesky( self.__C )
1517 raise ValueError("The %s covariance matrix is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1518 if self.isobject() and (self.__check or logging.getLogger().level < logging.WARNING):
1520 L = self.__C.cholesky()
1522 raise ValueError("The %s covariance object is not symmetric positive-definite. Please check your matrix input."%(self.__name,))
1525 "Vérification du type interne"
1526 return self.__is_scalar
1529 "Vérification du type interne"
1530 return self.__is_vector
1533 "Vérification du type interne"
1534 return self.__is_matrix
1537 "Vérification du type interne"
1538 return self.__is_object
1543 return Covariance(self.__name+"I", asCovariance = self.__C.I )
1544 elif self.isvector():
1545 return Covariance(self.__name+"I", asEyeByVector = 1. / self.__C )
1546 elif self.isscalar():
1547 return Covariance(self.__name+"I", asEyeByScalar = 1. / self.__C )
1548 elif self.isobject():
1549 return Covariance(self.__name+"I", asCovObject = self.__C.getI() )
1551 return None # Indispensable
1556 return Covariance(self.__name+"T", asCovariance = self.__C.T )
1557 elif self.isvector():
1558 return Covariance(self.__name+"T", asEyeByVector = self.__C )
1559 elif self.isscalar():
1560 return Covariance(self.__name+"T", asEyeByScalar = self.__C )
1561 elif self.isobject():
1562 return Covariance(self.__name+"T", asCovObject = self.__C.getT() )
1565 "Décomposition de Cholesky"
1567 return Covariance(self.__name+"C", asCovariance = numpy.linalg.cholesky(self.__C) )
1568 elif self.isvector():
1569 return Covariance(self.__name+"C", asEyeByVector = numpy.sqrt( self.__C ) )
1570 elif self.isscalar():
1571 return Covariance(self.__name+"C", asEyeByScalar = numpy.sqrt( self.__C ) )
1572 elif self.isobject() and hasattr(self.__C,"cholesky"):
1573 return Covariance(self.__name+"C", asCovObject = self.__C.cholesky() )
1575 def choleskyI(self):
1576 "Inversion de la décomposition de Cholesky"
1578 return Covariance(self.__name+"H", asCovariance = numpy.linalg.cholesky(self.__C).I )
1579 elif self.isvector():
1580 return Covariance(self.__name+"H", asEyeByVector = 1.0 / numpy.sqrt( self.__C ) )
1581 elif self.isscalar():
1582 return Covariance(self.__name+"H", asEyeByScalar = 1.0 / numpy.sqrt( self.__C ) )
1583 elif self.isobject() and hasattr(self.__C,"choleskyI"):
1584 return Covariance(self.__name+"H", asCovObject = self.__C.choleskyI() )
1586 def diag(self, msize=None):
1587 "Diagonale de la matrice"
1589 return numpy.diag(self.__C)
1590 elif self.isvector():
1592 elif self.isscalar():
1594 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1596 return self.__C * numpy.ones(int(msize))
1597 elif self.isobject():
1598 return self.__C.diag()
1600 def asfullmatrix(self, msize=None):
1604 elif self.isvector():
1605 return numpy.matrix( numpy.diag(self.__C), float )
1606 elif self.isscalar():
1608 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1610 return numpy.matrix( self.__C * numpy.eye(int(msize)), float )
1611 elif self.isobject() and hasattr(self.__C,"asfullmatrix"):
1612 return self.__C.asfullmatrix()
1614 def trace(self, msize=None):
1615 "Trace de la matrice"
1617 return numpy.trace(self.__C)
1618 elif self.isvector():
1619 return float(numpy.sum(self.__C))
1620 elif self.isscalar():
1622 raise ValueError("the size of the %s covariance matrix has to be given in case of definition as a scalar over the diagonal."%(self.__name,))
1624 return self.__C * int(msize)
1625 elif self.isobject():
1626 return self.__C.trace()
1632 "x.__repr__() <==> repr(x)"
1633 return repr(self.__C)
1636 "x.__str__() <==> str(x)"
1637 return str(self.__C)
1639 def __add__(self, other):
1640 "x.__add__(y) <==> x+y"
1641 if self.ismatrix() or self.isobject():
1642 return self.__C + numpy.asmatrix(other)
1643 elif self.isvector() or self.isscalar():
1644 _A = numpy.asarray(other)
1645 _A.reshape(_A.size)[::_A.shape[1]+1] += self.__C
1646 return numpy.asmatrix(_A)
1648 def __radd__(self, other):
1649 "x.__radd__(y) <==> y+x"
1650 raise NotImplementedError("%s covariance matrix __radd__ method not available for %s type!"%(self.__name,type(other)))
1652 def __sub__(self, other):
1653 "x.__sub__(y) <==> x-y"
1654 if self.ismatrix() or self.isobject():
1655 return self.__C - numpy.asmatrix(other)
1656 elif self.isvector() or self.isscalar():
1657 _A = numpy.asarray(other)
1658 _A.reshape(_A.size)[::_A.shape[1]+1] = self.__C - _A.reshape(_A.size)[::_A.shape[1]+1]
1659 return numpy.asmatrix(_A)
1661 def __rsub__(self, other):
1662 "x.__rsub__(y) <==> y-x"
1663 raise NotImplementedError("%s covariance matrix __rsub__ method not available for %s type!"%(self.__name,type(other)))
1666 "x.__neg__() <==> -x"
1669 def __mul__(self, other):
1670 "x.__mul__(y) <==> x*y"
1671 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1672 return self.__C * other
1673 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1674 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1675 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1676 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1677 return self.__C * numpy.asmatrix(other)
1679 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.asmatrix(other).shape,self.__name))
1680 elif self.isvector() and isinstance(other, (list, numpy.matrix, numpy.ndarray, tuple)):
1681 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1682 return numpy.asmatrix(self.__C * numpy.ravel(other)).T
1683 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1684 return numpy.asmatrix((self.__C * (numpy.asarray(other).transpose())).transpose())
1686 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(self.shape,numpy.ravel(other).shape,self.__name))
1687 elif self.isscalar() and isinstance(other,numpy.matrix):
1688 return self.__C * other
1689 elif self.isscalar() and isinstance(other, (list, numpy.ndarray, tuple)):
1690 if len(numpy.asarray(other).shape) == 1 or numpy.asarray(other).shape[1] == 1 or numpy.asarray(other).shape[0] == 1:
1691 return self.__C * numpy.asmatrix(numpy.ravel(other)).T
1693 return self.__C * numpy.asmatrix(other)
1694 elif self.isobject():
1695 return self.__C.__mul__(other)
1697 raise NotImplementedError("%s covariance matrix __mul__ method not available for %s type!"%(self.__name,type(other)))
1699 def __rmul__(self, other):
1700 "x.__rmul__(y) <==> y*x"
1701 if self.ismatrix() and isinstance(other, (int, numpy.matrix, float)):
1702 return other * self.__C
1703 elif self.ismatrix() and isinstance(other, (list, numpy.ndarray, tuple)):
1704 if numpy.ravel(other).size == self.shape[1]: # Vecteur
1705 return numpy.asmatrix(numpy.ravel(other)) * self.__C
1706 elif numpy.asmatrix(other).shape[0] == self.shape[1]: # Matrice
1707 return numpy.asmatrix(other) * self.__C
1709 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.asmatrix(other).shape,self.shape,self.__name))
1710 elif self.isvector() and isinstance(other,numpy.matrix):
1711 if numpy.ravel(other).size == self.shape[0]: # Vecteur
1712 return numpy.asmatrix(numpy.ravel(other) * self.__C)
1713 elif numpy.asmatrix(other).shape[1] == self.shape[0]: # Matrice
1714 return numpy.asmatrix(numpy.array(other) * self.__C)
1716 raise ValueError("operands could not be broadcast together with shapes %s %s in %s matrix"%(numpy.ravel(other).shape,self.shape,self.__name))
1717 elif self.isscalar() and isinstance(other,numpy.matrix):
1718 return other * self.__C
1719 elif self.isobject():
1720 return self.__C.__rmul__(other)
1722 raise NotImplementedError("%s covariance matrix __rmul__ method not available for %s type!"%(self.__name,type(other)))
1725 "x.__len__() <==> len(x)"
1726 return self.shape[0]
1728 # ==============================================================================
1729 class ObserverF(object):
1731 Creation d'une fonction d'observateur a partir de son texte
1733 def __init__(self, corps=""):
1734 self.__corps = corps
1735 def func(self,var,info):
1736 "Fonction d'observation"
1739 "Restitution du pointeur de fonction dans l'objet"
1742 # ==============================================================================
1743 class CaseLogger(object):
1745 Conservation des commandes de creation d'un cas
1747 def __init__(self, __name="", __objname="case", __addViewers=None, __addLoaders=None):
1748 self.__name = str(__name)
1749 self.__objname = str(__objname)
1750 self.__logSerie = []
1751 self.__switchoff = False
1753 "TUI" :Interfaces._TUIViewer,
1754 "SCD" :Interfaces._SCDViewer,
1755 "YACS":Interfaces._YACSViewer,
1758 "TUI" :Interfaces._TUIViewer,
1759 "COM" :Interfaces._COMViewer,
1761 if __addViewers is not None:
1762 self.__viewers.update(dict(__addViewers))
1763 if __addLoaders is not None:
1764 self.__loaders.update(dict(__addLoaders))
1766 def register(self, __command=None, __keys=None, __local=None, __pre=None, __switchoff=False):
1767 "Enregistrement d'une commande individuelle"
1768 if __command is not None and __keys is not None and __local is not None and not self.__switchoff:
1769 if "self" in __keys: __keys.remove("self")
1770 self.__logSerie.append( (str(__command), __keys, __local, __pre, __switchoff) )
1772 self.__switchoff = True
1774 self.__switchoff = False
1776 def dump(self, __filename=None, __format="TUI", __upa=""):
1777 "Restitution normalisée des commandes"
1778 if __format in self.__viewers:
1779 __formater = self.__viewers[__format](self.__name, self.__objname, self.__logSerie)
1781 raise ValueError("Dumping as \"%s\" is not available"%__format)
1782 return __formater.dump(__filename, __upa)
1784 def load(self, __filename=None, __content=None, __object=None, __format="TUI"):
1785 "Chargement normalisé des commandes"
1786 if __format in self.__loaders:
1787 __formater = self.__loaders[__format]()
1789 raise ValueError("Loading as \"%s\" is not available"%__format)
1790 return __formater.load(__filename, __content, __object)
1792 # ==============================================================================
1793 def MultiFonction( __xserie, _sFunction = lambda x: x ):
1795 Pour une liste ordonnée de vecteurs en entrée, renvoie en sortie la liste
1796 correspondante de valeurs de la fonction en argument
1798 if not PlatformInfo.isIterable( __xserie ):
1799 raise ValueError("MultiFonction not iterable unkown input type: %s"%(type(__xserie),))
1802 for __xvalue in __xserie:
1803 __multiHX.append( _sFunction( __xvalue ) )
1807 # ==============================================================================
1808 def CostFunction3D(_x,
1809 _Hm = None, # Pour simuler Hm(x) : HO["Direct"].appliedTo
1810 _HmX = None, # Simulation déjà faite de Hm(x)
1811 _arg = None, # Arguments supplementaires pour Hm, sous la forme d'un tuple
1816 _SIV = False, # A résorber pour la 8.0
1817 _SSC = [], # self._parameters["StoreSupplementaryCalculations"]
1818 _nPS = 0, # nbPreviousSteps
1819 _QM = "DA", # QualityMeasure
1820 _SSV = {}, # Entrée et/ou sortie : self.StoredVariables
1821 _fRt = False, # Restitue ou pas la sortie étendue
1822 _sSc = True, # Stocke ou pas les SSC
1825 Fonction-coût générale utile pour les algorithmes statiques/3D : 3DVAR, BLUE
1826 et dérivés, Kalman et dérivés, LeastSquares, SamplingTest, PSO, SA, Tabu,
1827 DFO, QuantileRegression
1833 for k in ["CostFunctionJ",
1839 "SimulatedObservationAtCurrentOptimum",
1840 "SimulatedObservationAtCurrentState",
1844 if hasattr(_SSV[k],"store"):
1845 _SSV[k].append = _SSV[k].store # Pour utiliser "append" au lieu de "store"
1847 _X = numpy.asmatrix(numpy.ravel( _x )).T
1848 if _SIV or "CurrentState" in _SSC or "CurrentOptimum" in _SSC:
1849 _SSV["CurrentState"].append( _X )
1851 if _HmX is not None:
1855 raise ValueError("COSTFUNCTION3D Operator has to be defined.")
1859 _HX = _Hm( _X, *_arg )
1860 _HX = numpy.asmatrix(numpy.ravel( _HX )).T
1862 if "SimulatedObservationAtCurrentState" in _SSC or \
1863 "SimulatedObservationAtCurrentOptimum" in _SSC:
1864 _SSV["SimulatedObservationAtCurrentState"].append( _HX )
1866 if numpy.any(numpy.isnan(_HX)):
1867 Jb, Jo, J = numpy.nan, numpy.nan, numpy.nan
1869 _Y = numpy.asmatrix(numpy.ravel( _Y )).T
1870 if _QM in ["AugmentedWeightedLeastSquares", "AWLS", "AugmentedPonderatedLeastSquares", "APLS", "DA"]:
1871 if _BI is None or _RI is None:
1872 raise ValueError("Background and Observation error covariance matrix has to be properly defined!")
1873 _Xb = numpy.asmatrix(numpy.ravel( _Xb )).T
1874 Jb = 0.5 * (_X - _Xb).T * _BI * (_X - _Xb)
1875 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1876 elif _QM in ["WeightedLeastSquares", "WLS", "PonderatedLeastSquares", "PLS"]:
1878 raise ValueError("Observation error covariance matrix has to be properly defined!")
1880 Jo = 0.5 * (_Y - _HX).T * _RI * (_Y - _HX)
1881 elif _QM in ["LeastSquares", "LS", "L2"]:
1883 Jo = 0.5 * (_Y - _HX).T * (_Y - _HX)
1884 elif _QM in ["AbsoluteValue", "L1"]:
1886 Jo = numpy.sum( numpy.abs(_Y - _HX) )
1887 elif _QM in ["MaximumError", "ME"]:
1889 Jo = numpy.max( numpy.abs(_Y - _HX) )
1890 elif _QM in ["QR", "Null"]:
1894 raise ValueError("Unknown asked quality measure!")
1896 J = float( Jb ) + float( Jo )
1899 _SSV["CostFunctionJb"].append( Jb )
1900 _SSV["CostFunctionJo"].append( Jo )
1901 _SSV["CostFunctionJ" ].append( J )
1903 if "IndexOfOptimum" in _SSC or \
1904 "CurrentOptimum" in _SSC or \
1905 "SimulatedObservationAtCurrentOptimum" in _SSC:
1906 IndexMin = numpy.argmin( _SSV["CostFunctionJ"][_nPS:] ) + _nPS
1907 if "IndexOfOptimum" in _SSC:
1908 _SSV["IndexOfOptimum"].append( IndexMin )
1909 if "CurrentOptimum" in _SSC:
1910 _SSV["CurrentOptimum"].append( _SSV["CurrentState"][IndexMin] )
1911 if "SimulatedObservationAtCurrentOptimum" in _SSC:
1912 _SSV["SimulatedObservationAtCurrentOptimum"].append( _SSV["SimulatedObservationAtCurrentState"][IndexMin] )
1917 if _QM in ["QR"]: # Pour le QuantileRegression
1922 # ==============================================================================
1923 if __name__ == "__main__":
1924 print('\n AUTODIAGNOSTIC \n')